X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FLoopStrengthReduce.cpp;h=9351a284c86a68ba7c0d3d6891454dd9034b1fba;hb=08a70b0f2bedd2efefb9e6fe18982082bdef72db;hp=c7344f6bbd76bc10a9e1c1c1f9deae7134bc54fd;hpb=c6af2432c802d241c8fffbe0371c023e6c58844e;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index c7344f6bbd7..9351a284c86 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -28,7 +28,7 @@ // // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however // it's useful to think about these as the same register, with some uses using -// the value of the register before the add and some using // it after. In this +// the value of the register before the add and some using it after. In this // example, the icmp is a post-increment user, since it uses %i.next, which is // the value of the induction variable after the increment. The other common // case of post-increment users is users outside the loop. @@ -53,31 +53,33 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "loop-reduce" #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/IVUsers.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/Assembly/Writer.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/ValueHandle.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include using namespace llvm; +#define DEBUG_TYPE "loop-reduce" + /// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for /// bail out. This threshold is far beyond the number of users that LSR can /// conceivably solve, so it should not affect generated code, but catches the @@ -103,15 +105,36 @@ static bool StressIVChain = false; namespace { -/// RegSortData - This class holds data which is used to order reuse candidates. +struct MemAccessTy { + /// Used in situations where the accessed memory type is unknown. + static const unsigned UnknownAddressSpace = ~0u; + + Type *MemTy; + unsigned AddrSpace; + + MemAccessTy() : MemTy(nullptr), AddrSpace(UnknownAddressSpace) {} + + MemAccessTy(Type *Ty, unsigned AS) : + MemTy(Ty), AddrSpace(AS) {} + + bool operator==(MemAccessTy Other) const { + return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace; + } + + bool operator!=(MemAccessTy Other) const { return !(*this == Other); } + + static MemAccessTy getUnknown(LLVMContext &Ctx) { + return MemAccessTy(Type::getVoidTy(Ctx), UnknownAddressSpace); + } +}; + +/// This class holds data which is used to order reuse candidates. class RegSortData { public: - /// UsedByIndices - This represents the set of LSRUse indices which reference + /// This represents the set of LSRUse indices which reference /// a particular register. SmallBitVector UsedByIndices; - RegSortData() {} - void print(raw_ostream &OS) const; void dump() const; }; @@ -130,8 +153,7 @@ void RegSortData::dump() const { namespace { -/// RegUseTracker - Map register candidates to information about how they are -/// used. +/// Map register candidates to information about how they are used. class RegUseTracker { typedef DenseMap RegUsesTy; @@ -139,9 +161,9 @@ class RegUseTracker { SmallVector RegSequence; public: - void CountRegister(const SCEV *Reg, size_t LUIdx); - void DropRegister(const SCEV *Reg, size_t LUIdx); - void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); + void countRegister(const SCEV *Reg, size_t LUIdx); + void dropRegister(const SCEV *Reg, size_t LUIdx); + void swapAndDropUse(size_t LUIdx, size_t LastLUIdx); bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; @@ -160,7 +182,7 @@ public: } void -RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { +RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) { std::pair Pair = RegUsesMap.insert(std::make_pair(Reg, RegSortData())); RegSortData &RSD = Pair.first->second; @@ -171,7 +193,7 @@ RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { } void -RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { +RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) { RegUsesTy::iterator It = RegUsesMap.find(Reg); assert(It != RegUsesMap.end()); RegSortData &RSD = It->second; @@ -180,14 +202,13 @@ RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { } void -RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { +RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) { assert(LUIdx <= LastLUIdx); // Update RegUses. The data structure is not optimized for this purpose; // we must iterate through it and update each of the bit vectors. - for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); - I != E; ++I) { - SmallBitVector &UsedByIndices = I->second.UsedByIndices; + for (auto &Pair : RegUsesMap) { + SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; if (LUIdx < UsedByIndices.size()) UsedByIndices[LUIdx] = LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; @@ -220,9 +241,8 @@ void RegUseTracker::clear() { namespace { -/// Formula - This class holds information that describes a formula for -/// computing satisfying a use. It may include broken-out immediates and scaled -/// registers. +/// This class holds information that describes a formula for computing +/// satisfying a use. It may include broken-out immediates and scaled registers. struct Formula { /// Global base address used for complex addressing. GlobalValue *BaseGV; @@ -236,29 +256,43 @@ struct Formula { /// The scale of any complex addressing. int64_t Scale; - /// BaseRegs - The list of "base" registers for this use. When this is - /// non-empty, + /// The list of "base" registers for this use. When this is non-empty. The + /// canonical representation of a formula is + /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and + /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). + /// #1 enforces that the scaled register is always used when at least two + /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. + /// #2 enforces that 1 * reg is reg. + /// This invariant can be temporarly broken while building a formula. + /// However, every formula inserted into the LSRInstance must be in canonical + /// form. SmallVector BaseRegs; - /// ScaledReg - The 'scaled' register for this use. This should be non-null - /// when Scale is not zero. + /// The 'scaled' register for this use. This should be non-null when Scale is + /// not zero. const SCEV *ScaledReg; - /// UnfoldedOffset - An additional constant offset which added near the - /// use. This requires a temporary register, but the offset itself can - /// live in an add immediate field rather than a register. + /// An additional constant offset which added near the use. This requires a + /// temporary register, but the offset itself can live in an add immediate + /// field rather than a register. int64_t UnfoldedOffset; Formula() - : BaseGV(0), BaseOffset(0), HasBaseReg(false), Scale(0), ScaledReg(0), - UnfoldedOffset(0) {} + : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0), + ScaledReg(nullptr), UnfoldedOffset(0) {} + + void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); + + bool isCanonical() const; - void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); + void canonicalize(); - unsigned getNumRegs() const; + bool unscale(); + + size_t getNumRegs() const; Type *getType() const; - void DeleteBaseReg(const SCEV *&S); + void deleteBaseReg(const SCEV *&S); bool referencesReg(const SCEV *S) const; bool hasRegsUsedByUsesOtherThan(size_t LUIdx, @@ -270,7 +304,7 @@ struct Formula { } -/// DoInitialMatch - Recursion helper for InitialMatch. +/// Recursion helper for initialMatch. static void DoInitialMatch(const SCEV *S, Loop *L, SmallVectorImpl &Good, SmallVectorImpl &Bad, @@ -283,9 +317,8 @@ static void DoInitialMatch(const SCEV *S, Loop *L, // Look at add operands. if (const SCEVAddExpr *Add = dyn_cast(S)) { - for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); - I != E; ++I) - DoInitialMatch(*I, L, Good, Bad, SE); + for (const SCEV *S : Add->operands()) + DoInitialMatch(S, L, Good, Bad, SE); return; } @@ -312,12 +345,10 @@ static void DoInitialMatch(const SCEV *S, Loop *L, DoInitialMatch(NewMul, L, MyGood, MyBad, SE); const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( SE.getEffectiveSCEVType(NewMul->getType()))); - for (SmallVectorImpl::const_iterator I = MyGood.begin(), - E = MyGood.end(); I != E; ++I) - Good.push_back(SE.getMulExpr(NegOne, *I)); - for (SmallVectorImpl::const_iterator I = MyBad.begin(), - E = MyBad.end(); I != E; ++I) - Bad.push_back(SE.getMulExpr(NegOne, *I)); + for (const SCEV *S : MyGood) + Good.push_back(SE.getMulExpr(NegOne, S)); + for (const SCEV *S : MyBad) + Bad.push_back(SE.getMulExpr(NegOne, S)); return; } @@ -326,10 +357,9 @@ static void DoInitialMatch(const SCEV *S, Loop *L, Bad.push_back(S); } -/// InitialMatch - Incorporate loop-variant parts of S into this Formula, -/// attempting to keep all loop-invariant and loop-computable values in a -/// single base register. -void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { +/// Incorporate loop-variant parts of S into this Formula, attempting to keep +/// all loop-invariant and loop-computable values in a single base register. +void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { SmallVector Good; SmallVector Bad; DoInitialMatch(S, L, Good, Bad, SE); @@ -345,47 +375,91 @@ void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { BaseRegs.push_back(Sum); HasBaseReg = true; } + canonicalize(); +} + +/// \brief Check whether or not this formula statisfies the canonical +/// representation. +/// \see Formula::BaseRegs. +bool Formula::isCanonical() const { + if (ScaledReg) + return Scale != 1 || !BaseRegs.empty(); + return BaseRegs.size() <= 1; } -/// getNumRegs - Return the total number of register operands used by this -/// formula. This does not include register uses implied by non-constant -/// addrec strides. -unsigned Formula::getNumRegs() const { +/// \brief Helper method to morph a formula into its canonical representation. +/// \see Formula::BaseRegs. +/// Every formula having more than one base register, must use the ScaledReg +/// field. Otherwise, we would have to do special cases everywhere in LSR +/// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... +/// On the other hand, 1*reg should be canonicalized into reg. +void Formula::canonicalize() { + if (isCanonical()) + return; + // So far we did not need this case. This is easy to implement but it is + // useless to maintain dead code. Beside it could hurt compile time. + assert(!BaseRegs.empty() && "1*reg => reg, should not be needed."); + // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. + ScaledReg = BaseRegs.back(); + BaseRegs.pop_back(); + Scale = 1; + size_t BaseRegsSize = BaseRegs.size(); + size_t Try = 0; + // If ScaledReg is an invariant, try to find a variant expression. + while (Try < BaseRegsSize && !isa(ScaledReg)) + std::swap(ScaledReg, BaseRegs[Try++]); +} + +/// \brief Get rid of the scale in the formula. +/// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. +/// \return true if it was possible to get rid of the scale, false otherwise. +/// \note After this operation the formula may not be in the canonical form. +bool Formula::unscale() { + if (Scale != 1) + return false; + Scale = 0; + BaseRegs.push_back(ScaledReg); + ScaledReg = nullptr; + return true; +} + +/// Return the total number of register operands used by this formula. This does +/// not include register uses implied by non-constant addrec strides. +size_t Formula::getNumRegs() const { return !!ScaledReg + BaseRegs.size(); } -/// getType - Return the type of this formula, if it has one, or null -/// otherwise. This type is meaningless except for the bit size. +/// Return the type of this formula, if it has one, or null otherwise. This type +/// is meaningless except for the bit size. Type *Formula::getType() const { return !BaseRegs.empty() ? BaseRegs.front()->getType() : ScaledReg ? ScaledReg->getType() : BaseGV ? BaseGV->getType() : - 0; + nullptr; } -/// DeleteBaseReg - Delete the given base reg from the BaseRegs list. -void Formula::DeleteBaseReg(const SCEV *&S) { +/// Delete the given base reg from the BaseRegs list. +void Formula::deleteBaseReg(const SCEV *&S) { if (&S != &BaseRegs.back()) std::swap(S, BaseRegs.back()); BaseRegs.pop_back(); } -/// referencesReg - Test if this formula references the given register. +/// Test if this formula references the given register. bool Formula::referencesReg(const SCEV *S) const { return S == ScaledReg || std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); } -/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers -/// which are used by uses other than the use with the given index. +/// Test whether this formula uses registers which are used by uses other than +/// the use with the given index. bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, const RegUseTracker &RegUses) const { if (ScaledReg) if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) return true; - for (SmallVectorImpl::const_iterator I = BaseRegs.begin(), - E = BaseRegs.end(); I != E; ++I) - if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) + for (const SCEV *BaseReg : BaseRegs) + if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) return true; return false; } @@ -394,16 +468,15 @@ void Formula::print(raw_ostream &OS) const { bool First = true; if (BaseGV) { if (!First) OS << " + "; else First = false; - WriteAsOperand(OS, BaseGV, /*PrintType=*/false); + BaseGV->printAsOperand(OS, /*PrintType=*/false); } if (BaseOffset != 0) { if (!First) OS << " + "; else First = false; OS << BaseOffset; } - for (SmallVectorImpl::const_iterator I = BaseRegs.begin(), - E = BaseRegs.end(); I != E; ++I) { + for (const SCEV *BaseReg : BaseRegs) { if (!First) OS << " + "; else First = false; - OS << "reg(" << **I << ')'; + OS << "reg(" << *BaseReg << ')'; } if (HasBaseReg && BaseRegs.empty()) { if (!First) OS << " + "; else First = false; @@ -422,7 +495,7 @@ void Formula::print(raw_ostream &OS) const { OS << ')'; } if (UnfoldedOffset != 0) { - if (!First) OS << " + "; else First = false; + if (!First) OS << " + "; OS << "imm(" << UnfoldedOffset << ')'; } } @@ -433,24 +506,24 @@ void Formula::dump() const { } #endif -/// isAddRecSExtable - Return true if the given addrec can be sign-extended -/// without changing its value. +/// Return true if the given addrec can be sign-extended without changing its +/// value. static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); return isa(SE.getSignExtendExpr(AR, WideTy)); } -/// isAddSExtable - Return true if the given add can be sign-extended -/// without changing its value. +/// Return true if the given add can be sign-extended without changing its +/// value. static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); return isa(SE.getSignExtendExpr(A, WideTy)); } -/// isMulSExtable - Return true if the given mul can be sign-extended -/// without changing its value. +/// Return true if the given mul can be sign-extended without changing its +/// value. static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), @@ -458,12 +531,11 @@ static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { return isa(SE.getSignExtendExpr(M, WideTy)); } -/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined -/// and if the remainder is known to be zero, or null otherwise. If -/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified -/// to Y, ignoring that the multiplication may overflow, which is useful when -/// the result will be used in a context where the most significant bits are -/// ignored. +/// Return an expression for LHS /s RHS, if it can be determined and if the +/// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits +/// is true, expressions like (X * Y) /s Y are simplified to Y, ignoring that +/// the multiplication may overflow, which is useful when the result will be +/// used in a context where the most significant bits are ignored. static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE, bool IgnoreSignificantBits = false) { @@ -487,11 +559,11 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, // Check for a division of a constant by a constant. if (const SCEVConstant *C = dyn_cast(LHS)) { if (!RC) - return 0; + return nullptr; const APInt &LA = C->getValue()->getValue(); const APInt &RA = RC->getValue()->getValue(); if (LA.srem(RA) != 0) - return 0; + return nullptr; return SE.getConstant(LA.sdiv(RA)); } @@ -500,32 +572,30 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, IgnoreSignificantBits); - if (!Step) return 0; + if (!Step) return nullptr; const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, IgnoreSignificantBits); - if (!Start) return 0; + if (!Start) return nullptr; // FlagNW is independent of the start value, step direction, and is // preserved with smaller magnitude steps. // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); } - return 0; + return nullptr; } // Distribute the sdiv over add operands, if the add doesn't overflow. if (const SCEVAddExpr *Add = dyn_cast(LHS)) { if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { SmallVector Ops; - for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); - I != E; ++I) { - const SCEV *Op = getExactSDiv(*I, RHS, SE, - IgnoreSignificantBits); - if (!Op) return 0; + for (const SCEV *S : Add->operands()) { + const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); + if (!Op) return nullptr; Ops.push_back(Op); } return SE.getAddExpr(Ops); } - return 0; + return nullptr; } // Check for a multiply operand that we can pull RHS out of. @@ -533,9 +603,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { SmallVector Ops; bool Found = false; - for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); - I != E; ++I) { - const SCEV *S = *I; + for (const SCEV *S : Mul->operands()) { if (!Found) if (const SCEV *Q = getExactSDiv(S, RHS, SE, IgnoreSignificantBits)) { @@ -544,18 +612,17 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, } Ops.push_back(S); } - return Found ? SE.getMulExpr(Ops) : 0; + return Found ? SE.getMulExpr(Ops) : nullptr; } - return 0; + return nullptr; } // Otherwise we don't know. - return 0; + return nullptr; } -/// ExtractImmediate - If S involves the addition of a constant integer value, -/// return that integer value, and mutate S to point to a new SCEV with that -/// value excluded. +/// If S involves the addition of a constant integer value, return that integer +/// value, and mutate S to point to a new SCEV with that value excluded. static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { if (const SCEVConstant *C = dyn_cast(S)) { if (C->getValue()->getValue().getMinSignedBits() <= 64) { @@ -580,9 +647,8 @@ static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { return 0; } -/// ExtractSymbol - If S involves the addition of a GlobalValue address, -/// return that symbol, and mutate S to point to a new SCEV with that -/// value excluded. +/// If S involves the addition of a GlobalValue address, return that symbol, and +/// mutate S to point to a new SCEV with that value excluded. static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { if (const SCEVUnknown *U = dyn_cast(S)) { if (GlobalValue *GV = dyn_cast(U->getValue())) { @@ -604,11 +670,11 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { SCEV::FlagAnyWrap); return Result; } - return 0; + return nullptr; } -/// isAddressUse - Returns true if the specified instruction is using the -/// specified value as an address. +/// Returns true if the specified instruction is using the specified value as an +/// address. static bool isAddressUse(Instruction *Inst, Value *OperandVal) { bool isAddress = isa(Inst); if (StoreInst *SI = dyn_cast(Inst)) { @@ -632,12 +698,15 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) { return isAddress; } -/// getAccessType - Return the type of the memory being accessed. -static Type *getAccessType(const Instruction *Inst) { - Type *AccessTy = Inst->getType(); - if (const StoreInst *SI = dyn_cast(Inst)) - AccessTy = SI->getOperand(0)->getType(); - else if (const IntrinsicInst *II = dyn_cast(Inst)) { +/// Return the type of the memory being accessed. +static MemAccessTy getAccessType(const Instruction *Inst) { + MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace); + if (const StoreInst *SI = dyn_cast(Inst)) { + AccessTy.MemTy = SI->getOperand(0)->getType(); + AccessTy.AddrSpace = SI->getPointerAddressSpace(); + } else if (const LoadInst *LI = dyn_cast(Inst)) { + AccessTy.AddrSpace = LI->getPointerAddressSpace(); + } else if (const IntrinsicInst *II = dyn_cast(Inst)) { // Addressing modes can also be folded into prefetches and a variety // of intrinsics. switch (II->getIntrinsicID()) { @@ -646,21 +715,21 @@ static Type *getAccessType(const Instruction *Inst) { case Intrinsic::x86_sse2_storeu_pd: case Intrinsic::x86_sse2_storeu_dq: case Intrinsic::x86_sse2_storel_dq: - AccessTy = II->getArgOperand(0)->getType(); + AccessTy.MemTy = II->getArgOperand(0)->getType(); break; } } // All pointers have the same requirements, so canonicalize them to an // arbitrary pointer type to minimize variation. - if (PointerType *PTy = dyn_cast(AccessTy)) - AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), - PTy->getAddressSpace()); + if (PointerType *PTy = dyn_cast(AccessTy.MemTy)) + AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), + PTy->getAddressSpace()); return AccessTy; } -/// isExistingPhi - Return true if this AddRec is already a phi in its loop. +/// Return true if this AddRec is already a phi in its loop. static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); PHINode *PN = dyn_cast(I); ++I) { @@ -683,7 +752,7 @@ static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { /// TODO: Allow UDivExpr if we can find an existing IV increment that is an /// obvious multiple of the UDivExpr. static bool isHighCostExpansion(const SCEV *S, - SmallPtrSet &Processed, + SmallPtrSetImpl &Processed, ScalarEvolution &SE) { // Zero/One operand expressions switch (S->getSCEVType()) { @@ -701,13 +770,12 @@ static bool isHighCostExpansion(const SCEV *S, Processed, SE); } - if (!Processed.insert(S)) + if (!Processed.insert(S).second) return false; if (const SCEVAddExpr *Add = dyn_cast(S)) { - for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); - I != E; ++I) { - if (isHighCostExpansion(*I, Processed, SE)) + for (const SCEV *S : Add->operands()) { + if (isHighCostExpansion(S, Processed, SE)) return true; } return false; @@ -723,13 +791,12 @@ static bool isHighCostExpansion(const SCEV *S, // multiplication already generates this expression. if (const SCEVUnknown *U = dyn_cast(Mul->getOperand(1))) { Value *UVal = U->getValue(); - for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end(); - UI != UE; ++UI) { + for (User *UR : UVal->users()) { // If U is a constant, it may be used by a ConstantExpr. - Instruction *User = dyn_cast(*UI); - if (User && User->getOpcode() == Instruction::Mul - && SE.isSCEVable(User->getType())) { - return SE.getSCEV(User) == Mul; + Instruction *UI = dyn_cast(UR); + if (UI && UI->getOpcode() == Instruction::Mul && + SE.isSCEVable(UI->getType())) { + return SE.getSCEV(UI) == Mul; } } } @@ -745,9 +812,8 @@ static bool isHighCostExpansion(const SCEV *S, return true; } -/// DeleteTriviallyDeadInstructions - If any of the instructions is the -/// specified set are trivially dead, delete them and see if this makes any of -/// their operands subsequently dead. +/// If any of the instructions is the specified set are trivially dead, delete +/// them and see if this makes any of their operands subsequently dead. static bool DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { bool Changed = false; @@ -756,14 +822,14 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { Value *V = DeadInsts.pop_back_val(); Instruction *I = dyn_cast_or_null(V); - if (I == 0 || !isInstructionTriviallyDead(I)) + if (!I || !isInstructionTriviallyDead(I)) continue; - for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) - if (Instruction *U = dyn_cast(*OI)) { - *OI = 0; + for (Use &O : I->operands()) + if (Instruction *U = dyn_cast(O)) { + O = nullptr; if (U->use_empty()) - DeadInsts.push_back(U); + DeadInsts.emplace_back(U); } I->eraseFromParent(); @@ -774,8 +840,27 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { } namespace { +class LSRUse; +} + +/// \brief Check if the addressing mode defined by \p F is completely +/// folded in \p LU at isel time. +/// This includes address-mode folding and special icmp tricks. +/// This function returns true if \p LU can accommodate what \p F +/// defines and up to 1 base + 1 scaled + offset. +/// In other words, if \p F has several base registers, this function may +/// still return true. Therefore, users still need to account for +/// additional base registers and/or unfolded offsets to derive an +/// accurate cost model. +static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F); +// Get the cost of the scaling factor used in F for LU. +static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F); -/// Cost - This class is used to measure and compare candidate formulae. +namespace { + +/// This class is used to measure and compare candidate formulae. class Cost { /// TODO: Some of these could be merged. Also, a lexical ordering /// isn't always optimal. @@ -785,23 +870,24 @@ class Cost { unsigned NumBaseAdds; unsigned ImmCost; unsigned SetupCost; + unsigned ScaleCost; public: Cost() : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), - SetupCost(0) {} + SetupCost(0), ScaleCost(0) {} bool operator<(const Cost &Other) const; - void Loose(); + void Lose(); #ifndef NDEBUG // Once any of the metrics loses, they must all remain losers. bool isValid() { return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds - | ImmCost | SetupCost) != ~0u) + | ImmCost | SetupCost | ScaleCost) != ~0u) || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds - & ImmCost & SetupCost) == ~0u); + & ImmCost & SetupCost & ScaleCost) == ~0u); } #endif @@ -810,34 +896,36 @@ public: return NumRegs == ~0u; } - void RateFormula(const Formula &F, - SmallPtrSet &Regs, + void RateFormula(const TargetTransformInfo &TTI, + const Formula &F, + SmallPtrSetImpl &Regs, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, ScalarEvolution &SE, DominatorTree &DT, - SmallPtrSet *LoserRegs = 0); + const LSRUse &LU, + SmallPtrSetImpl *LoserRegs = nullptr); void print(raw_ostream &OS) const; void dump() const; private: void RateRegister(const SCEV *Reg, - SmallPtrSet &Regs, + SmallPtrSetImpl &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT); void RatePrimaryRegister(const SCEV *Reg, - SmallPtrSet &Regs, + SmallPtrSetImpl &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT, - SmallPtrSet *LoserRegs); + SmallPtrSetImpl *LoserRegs); }; } -/// RateRegister - Tally up interesting quantities from the given register. +/// Tally up interesting quantities from the given register. void Cost::RateRegister(const SCEV *Reg, - SmallPtrSet &Regs, + SmallPtrSetImpl &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT) { if (const SCEVAddRecExpr *AR = dyn_cast(Reg)) { @@ -851,7 +939,7 @@ void Cost::RateRegister(const SCEV *Reg, return; // Otherwise, do not consider this formula at all. - Loose(); + Lose(); return; } AddRecCost += 1; /// TODO: This should be a function of the stride. @@ -881,47 +969,48 @@ void Cost::RateRegister(const SCEV *Reg, SE.hasComputableLoopEvolution(Reg, L); } -/// RatePrimaryRegister - Record this register in the set. If we haven't seen it -/// before, rate it. Optional LoserRegs provides a way to declare any formula -/// that refers to one of those regs an instant loser. +/// Record this register in the set. If we haven't seen it before, rate +/// it. Optional LoserRegs provides a way to declare any formula that refers to +/// one of those regs an instant loser. void Cost::RatePrimaryRegister(const SCEV *Reg, - SmallPtrSet &Regs, + SmallPtrSetImpl &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT, - SmallPtrSet *LoserRegs) { + SmallPtrSetImpl *LoserRegs) { if (LoserRegs && LoserRegs->count(Reg)) { - Loose(); + Lose(); return; } - if (Regs.insert(Reg)) { + if (Regs.insert(Reg).second) { RateRegister(Reg, Regs, L, SE, DT); if (LoserRegs && isLoser()) LoserRegs->insert(Reg); } } -void Cost::RateFormula(const Formula &F, - SmallPtrSet &Regs, +void Cost::RateFormula(const TargetTransformInfo &TTI, + const Formula &F, + SmallPtrSetImpl &Regs, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, ScalarEvolution &SE, DominatorTree &DT, - SmallPtrSet *LoserRegs) { + const LSRUse &LU, + SmallPtrSetImpl *LoserRegs) { + assert(F.isCanonical() && "Cost is accurate only for canonical formula"); // Tally up the registers. if (const SCEV *ScaledReg = F.ScaledReg) { if (VisitedRegs.count(ScaledReg)) { - Loose(); + Lose(); return; } RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); if (isLoser()) return; } - for (SmallVectorImpl::const_iterator I = F.BaseRegs.begin(), - E = F.BaseRegs.end(); I != E; ++I) { - const SCEV *BaseReg = *I; + for (const SCEV *BaseReg : F.BaseRegs) { if (VisitedRegs.count(BaseReg)) { - Loose(); + Lose(); return; } RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); @@ -930,14 +1019,20 @@ void Cost::RateFormula(const Formula &F, } // Determine how many (unfolded) adds we'll need inside the loop. - size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); + size_t NumBaseParts = F.getNumRegs(); if (NumBaseParts > 1) - NumBaseAdds += NumBaseParts - 1; + // Do not count the base and a possible second register if the target + // allows to fold 2 registers. + NumBaseAdds += + NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(TTI, LU, F))); + NumBaseAdds += (F.UnfoldedOffset != 0); + + // Accumulate non-free scaling amounts. + ScaleCost += getScalingFactorCost(TTI, LU, F); // Tally up the non-zero immediates. - for (SmallVectorImpl::const_iterator I = Offsets.begin(), - E = Offsets.end(); I != E; ++I) { - int64_t Offset = (uint64_t)*I + F.BaseOffset; + for (int64_t O : Offsets) { + int64_t Offset = (uint64_t)O + F.BaseOffset; if (F.BaseGV) ImmCost += 64; // Handle symbolic values conservatively. // TODO: This should probably be the pointer size. @@ -947,31 +1042,24 @@ void Cost::RateFormula(const Formula &F, assert(isValid() && "invalid cost"); } -/// Loose - Set this cost to a losing value. -void Cost::Loose() { +/// Set this cost to a losing value. +void Cost::Lose() { NumRegs = ~0u; AddRecCost = ~0u; NumIVMuls = ~0u; NumBaseAdds = ~0u; ImmCost = ~0u; SetupCost = ~0u; + ScaleCost = ~0u; } -/// operator< - Choose the lower cost. +/// Choose the lower cost. bool Cost::operator<(const Cost &Other) const { - if (NumRegs != Other.NumRegs) - return NumRegs < Other.NumRegs; - if (AddRecCost != Other.AddRecCost) - return AddRecCost < Other.AddRecCost; - if (NumIVMuls != Other.NumIVMuls) - return NumIVMuls < Other.NumIVMuls; - if (NumBaseAdds != Other.NumBaseAdds) - return NumBaseAdds < Other.NumBaseAdds; - if (ImmCost != Other.ImmCost) - return ImmCost < Other.ImmCost; - if (SetupCost != Other.SetupCost) - return SetupCost < Other.SetupCost; - return false; + return std::tie(NumRegs, AddRecCost, NumIVMuls, NumBaseAdds, ScaleCost, + ImmCost, SetupCost) < + std::tie(Other.NumRegs, Other.AddRecCost, Other.NumIVMuls, + Other.NumBaseAdds, Other.ScaleCost, Other.ImmCost, + Other.SetupCost); } void Cost::print(raw_ostream &OS) const { @@ -983,6 +1071,8 @@ void Cost::print(raw_ostream &OS) const { if (NumBaseAdds != 0) OS << ", plus " << NumBaseAdds << " base add" << (NumBaseAdds == 1 ? "" : "s"); + if (ScaleCost != 0) + OS << ", plus " << ScaleCost << " scale cost"; if (ImmCost != 0) OS << ", plus " << ImmCost << " imm cost"; if (SetupCost != 0) @@ -997,29 +1087,28 @@ void Cost::dump() const { namespace { -/// LSRFixup - An operand value in an instruction which is to be replaced -/// with some equivalent, possibly strength-reduced, replacement. +/// An operand value in an instruction which is to be replaced with some +/// equivalent, possibly strength-reduced, replacement. struct LSRFixup { - /// UserInst - The instruction which will be updated. + /// The instruction which will be updated. Instruction *UserInst; - /// OperandValToReplace - The operand of the instruction which will - /// be replaced. The operand may be used more than once; every instance - /// will be replaced. + /// The operand of the instruction which will be replaced. The operand may be + /// used more than once; every instance will be replaced. Value *OperandValToReplace; - /// PostIncLoops - If this user is to use the post-incremented value of an - /// induction variable, this variable is non-null and holds the loop - /// associated with the induction variable. + /// If this user is to use the post-incremented value of an induction + /// variable, this variable is non-null and holds the loop associated with the + /// induction variable. PostIncLoopSet PostIncLoops; - /// LUIdx - The index of the LSRUse describing the expression which - /// this fixup needs, minus an offset (below). + /// The index of the LSRUse describing the expression which this fixup needs, + /// minus an offset (below). size_t LUIdx; - /// Offset - A constant offset to be added to the LSRUse expression. - /// This allows multiple fixups to share the same LSRUse with different - /// offsets, for example in an unrolled loop. + /// A constant offset to be added to the LSRUse expression. This allows + /// multiple fixups to share the same LSRUse with different offsets, for + /// example in an unrolled loop. int64_t Offset; bool isUseFullyOutsideLoop(const Loop *L) const; @@ -1033,10 +1122,10 @@ struct LSRFixup { } LSRFixup::LSRFixup() - : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} + : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)), + Offset(0) {} -/// isUseFullyOutsideLoop - Test whether this fixup always uses its -/// value outside of the given loop. +/// Test whether this fixup always uses its value outside of the given loop. bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { // PHI nodes use their value in their incoming blocks. if (const PHINode *PN = dyn_cast(UserInst)) { @@ -1055,19 +1144,18 @@ void LSRFixup::print(raw_ostream &OS) const { // Store is common and interesting enough to be worth special-casing. if (StoreInst *Store = dyn_cast(UserInst)) { OS << "store "; - WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); + Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); } else if (UserInst->getType()->isVoidTy()) OS << UserInst->getOpcodeName(); else - WriteAsOperand(OS, UserInst, /*PrintType=*/false); + UserInst->printAsOperand(OS, /*PrintType=*/false); OS << ", OperandValToReplace="; - WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); + OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); - for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), - E = PostIncLoops.end(); I != E; ++I) { + for (const Loop *PIL : PostIncLoops) { OS << ", PostIncLoop="; - WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); + PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); } if (LUIdx != ~size_t(0)) @@ -1085,8 +1173,8 @@ void LSRFixup::dump() const { namespace { -/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding -/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. +/// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted +/// SmallVectors of const SCEV*. struct UniquifierDenseMapInfo { static SmallVector getEmptyKey() { SmallVector V; @@ -1101,11 +1189,7 @@ struct UniquifierDenseMapInfo { } static unsigned getHashValue(const SmallVector &V) { - unsigned Result = 0; - for (SmallVectorImpl::const_iterator I = V.begin(), - E = V.end(); I != E; ++I) - Result ^= DenseMapInfo::getHashValue(*I); - return Result; + return static_cast(hash_combine_range(V.begin(), V.end())); } static bool isEqual(const SmallVector &LHS, @@ -1114,17 +1198,17 @@ struct UniquifierDenseMapInfo { } }; -/// LSRUse - This class holds the state that LSR keeps for each use in -/// IVUsers, as well as uses invented by LSR itself. It includes information -/// about what kinds of things can be folded into the user, information about -/// the user itself, and information about how the use may be satisfied. -/// TODO: Represent multiple users of the same expression in common? +/// This class holds the state that LSR keeps for each use in IVUsers, as well +/// as uses invented by LSR itself. It includes information about what kinds of +/// things can be folded into the user, information about the user itself, and +/// information about how the use may be satisfied. TODO: Represent multiple +/// users of the same expression in common? class LSRUse { DenseSet, UniquifierDenseMapInfo> Uniquifier; public: - /// KindType - An enum for a kind of use, indicating what types of - /// scaled and immediate operands it might support. + /// An enum for a kind of use, indicating what types of scaled and immediate + /// operands it might support. enum KindType { Basic, ///< A normal use, with no folding. Special, ///< A special case of basic, allowing -1 scales. @@ -1133,37 +1217,44 @@ public: // TODO: Add a generic icmp too? }; + typedef PointerIntPair SCEVUseKindPair; + KindType Kind; - Type *AccessTy; + MemAccessTy AccessTy; SmallVector Offsets; int64_t MinOffset; int64_t MaxOffset; - /// AllFixupsOutsideLoop - This records whether all of the fixups using this - /// LSRUse are outside of the loop, in which case some special-case heuristics - /// may be used. + /// This records whether all of the fixups using this LSRUse are outside of + /// the loop, in which case some special-case heuristics may be used. bool AllFixupsOutsideLoop; - /// WidestFixupType - This records the widest use type for any fixup using - /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different - /// max fixup widths to be equivalent, because the narrower one may be relying - /// on the implicit truncation to truncate away bogus bits. + /// RigidFormula is set to true to guarantee that this use will be associated + /// with a single formula--the one that initially matched. Some SCEV + /// expressions cannot be expanded. This allows LSR to consider the registers + /// used by those expressions without the need to expand them later after + /// changing the formula. + bool RigidFormula; + + /// This records the widest use type for any fixup using this + /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max + /// fixup widths to be equivalent, because the narrower one may be relying on + /// the implicit truncation to truncate away bogus bits. Type *WidestFixupType; - /// Formulae - A list of ways to build a value that can satisfy this user. - /// After the list is populated, one of these is selected heuristically and - /// used to formulate a replacement for OperandValToReplace in UserInst. + /// A list of ways to build a value that can satisfy this user. After the + /// list is populated, one of these is selected heuristically and used to + /// formulate a replacement for OperandValToReplace in UserInst. SmallVector Formulae; - /// Regs - The set of register candidates used by all formulae in this LSRUse. + /// The set of register candidates used by all formulae in this LSRUse. SmallPtrSet Regs; - LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), - MinOffset(INT64_MAX), - MaxOffset(INT64_MIN), - AllFixupsOutsideLoop(true), - WidestFixupType(0) {} + LSRUse(KindType K, MemAccessTy AT) + : Kind(K), AccessTy(AT), MinOffset(INT64_MAX), MaxOffset(INT64_MIN), + AllFixupsOutsideLoop(true), RigidFormula(false), + WidestFixupType(nullptr) {} bool HasFormulaWithSameRegs(const Formula &F) const; bool InsertFormula(const Formula &F); @@ -1176,8 +1267,8 @@ public: } -/// HasFormula - Test whether this use as a formula which has the same -/// registers as the given formula. +/// Test whether this use as a formula which has the same registers as the given +/// formula. bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { SmallVector Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); @@ -1186,9 +1277,14 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { return Uniquifier.count(Key); } -/// InsertFormula - If the given formula has not yet been inserted, add it to -/// the list, and return true. Return false otherwise. +/// If the given formula has not yet been inserted, add it to the list, and +/// return true. Return false otherwise. The formula must be in canonical form. bool LSRUse::InsertFormula(const Formula &F) { + assert(F.isCanonical() && "Invalid canonical representation"); + + if (!Formulae.empty() && RigidFormula) + return false; + SmallVector Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. @@ -1201,9 +1297,8 @@ bool LSRUse::InsertFormula(const Formula &F) { assert((!F.ScaledReg || !F.ScaledReg->isZero()) && "Zero allocated in a scaled register!"); #ifndef NDEBUG - for (SmallVectorImpl::const_iterator I = - F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) - assert(!(*I)->isZero() && "Zero allocated in a base register!"); + for (const SCEV *BaseReg : F.BaseRegs) + assert(!BaseReg->isZero() && "Zero allocated in a base register!"); #endif // Add the formula to the list. @@ -1211,34 +1306,33 @@ bool LSRUse::InsertFormula(const Formula &F) { // Record registers now being used by this use. Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); + if (F.ScaledReg) + Regs.insert(F.ScaledReg); return true; } -/// DeleteFormula - Remove the given formula from this use's list. +/// Remove the given formula from this use's list. void LSRUse::DeleteFormula(Formula &F) { if (&F != &Formulae.back()) std::swap(F, Formulae.back()); Formulae.pop_back(); } -/// RecomputeRegs - Recompute the Regs field, and update RegUses. +/// Recompute the Regs field, and update RegUses. void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { // Now that we've filtered out some formulae, recompute the Regs set. - SmallPtrSet OldRegs = Regs; + SmallPtrSet OldRegs = std::move(Regs); Regs.clear(); - for (SmallVectorImpl::const_iterator I = Formulae.begin(), - E = Formulae.end(); I != E; ++I) { - const Formula &F = *I; + for (const Formula &F : Formulae) { if (F.ScaledReg) Regs.insert(F.ScaledReg); Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); } // Update the RegTracker. - for (SmallPtrSet::iterator I = OldRegs.begin(), - E = OldRegs.end(); I != E; ++I) - if (!Regs.count(*I)) - RegUses.DropRegister(*I, LUIdx); + for (const SCEV *S : OldRegs) + if (!Regs.count(S)) + RegUses.dropRegister(S, LUIdx); } void LSRUse::print(raw_ostream &OS) const { @@ -1249,18 +1343,21 @@ void LSRUse::print(raw_ostream &OS) const { case ICmpZero: OS << "ICmpZero"; break; case Address: OS << "Address of "; - if (AccessTy->isPointerTy()) + if (AccessTy.MemTy->isPointerTy()) OS << "pointer"; // the full pointer type could be really verbose - else - OS << *AccessTy; + else { + OS << *AccessTy.MemTy; + } + + OS << " in addrspace(" << AccessTy.AddrSpace << ')'; } OS << ", Offsets={"; - for (SmallVectorImpl::const_iterator I = Offsets.begin(), - E = Offsets.end(); I != E; ++I) { - OS << *I; - if (llvm::next(I) != E) - OS << ','; + bool NeedComma = false; + for (int64_t O : Offsets) { + if (NeedComma) OS << ','; + OS << O; + NeedComma = true; } OS << '}'; @@ -1277,18 +1374,14 @@ void LSRUse::dump() const { } #endif -/// isLegalUse - Test whether the use described by AM is "legal", meaning it can -/// be completely folded into the user instruction at isel time. This includes -/// address-mode folding and special icmp tricks. -static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind, - Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale) { +static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, + LSRUse::KindType Kind, MemAccessTy AccessTy, + GlobalValue *BaseGV, int64_t BaseOffset, + bool HasBaseReg, int64_t Scale) { switch (Kind) { case LSRUse::Address: - return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); - - // Otherwise, just guess that reg+reg addressing is legal. - //return ; + return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset, + HasBaseReg, Scale, AccessTy.AddrSpace); case LSRUse::ICmpZero: // There's not even a target hook for querying whether it would be legal to @@ -1333,10 +1426,11 @@ static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind, llvm_unreachable("Invalid LSRUse Kind!"); } -static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, - int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, - GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, - int64_t Scale) { +static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, + int64_t MinOffset, int64_t MaxOffset, + LSRUse::KindType Kind, MemAccessTy AccessTy, + GlobalValue *BaseGV, int64_t BaseOffset, + bool HasBaseReg, int64_t Scale) { // Check for overflow. if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != (MinOffset > 0)) @@ -1347,20 +1441,95 @@ static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, return false; MaxOffset = (uint64_t)BaseOffset + MaxOffset; - return isLegalUse(TTI, Kind, AccessTy, BaseGV, MinOffset, HasBaseReg, - Scale) && - isLegalUse(TTI, Kind, AccessTy, BaseGV, MaxOffset, HasBaseReg, Scale); + return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, + HasBaseReg, Scale) && + isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, + HasBaseReg, Scale); +} + +static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, + int64_t MinOffset, int64_t MaxOffset, + LSRUse::KindType Kind, MemAccessTy AccessTy, + const Formula &F) { + // For the purpose of isAMCompletelyFolded either having a canonical formula + // or a scale not equal to zero is correct. + // Problems may arise from non canonical formulae having a scale == 0. + // Strictly speaking it would best to just rely on canonical formulae. + // However, when we generate the scaled formulae, we first check that the + // scaling factor is profitable before computing the actual ScaledReg for + // compile time sake. + assert((F.isCanonical() || F.Scale != 0)); + return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, + F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); +} + +/// Test whether we know how to expand the current formula. +static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, + int64_t MaxOffset, LSRUse::KindType Kind, + MemAccessTy AccessTy, GlobalValue *BaseGV, + int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { + // We know how to expand completely foldable formulae. + return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, + BaseOffset, HasBaseReg, Scale) || + // Or formulae that use a base register produced by a sum of base + // registers. + (Scale == 1 && + isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, + BaseGV, BaseOffset, true, 0)); } static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, - int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, - const Formula &F) { + int64_t MaxOffset, LSRUse::KindType Kind, + MemAccessTy AccessTy, const Formula &F) { return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); } +static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F) { + return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, + F.Scale); +} + +static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F) { + if (!F.Scale) + return 0; + + // If the use is not completely folded in that instruction, we will have to + // pay an extra cost only for scale != 1. + if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, F)) + return F.Scale != 1; + + switch (LU.Kind) { + case LSRUse::Address: { + // Check the scaling factor cost with both the min and max offsets. + int ScaleCostMinOffset = TTI.getScalingFactorCost( + LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, + F.Scale, LU.AccessTy.AddrSpace); + int ScaleCostMaxOffset = TTI.getScalingFactorCost( + LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, + F.Scale, LU.AccessTy.AddrSpace); + + assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && + "Legal addressing mode has an illegal cost!"); + return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); + } + case LSRUse::ICmpZero: + case LSRUse::Basic: + case LSRUse::Special: + // The use is completely folded, i.e., everything is folded into the + // instruction. + return 0; + } + + llvm_unreachable("Invalid LSRUse Kind!"); +} + static bool isAlwaysFoldable(const TargetTransformInfo &TTI, - LSRUse::KindType Kind, Type *AccessTy, + LSRUse::KindType Kind, MemAccessTy AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg) { // Fast-path: zero is always foldable. @@ -1377,13 +1546,15 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI, HasBaseReg = true; } - return isLegalUse(TTI, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); + return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, + HasBaseReg, Scale); } static bool isAlwaysFoldable(const TargetTransformInfo &TTI, ScalarEvolution &SE, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, - Type *AccessTy, const SCEV *S, bool HasBaseReg) { + MemAccessTy AccessTy, const SCEV *S, + bool HasBaseReg) { // Fast-path: zero is always foldable. if (S->isZero()) return true; @@ -1402,39 +1573,15 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI, // base and a scale. int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; - return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, - BaseOffset, HasBaseReg, Scale); + return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, + BaseOffset, HasBaseReg, Scale); } namespace { -/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding -/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. -struct UseMapDenseMapInfo { - static std::pair getEmptyKey() { - return std::make_pair(reinterpret_cast(-1), LSRUse::Basic); - } - - static std::pair getTombstoneKey() { - return std::make_pair(reinterpret_cast(-2), LSRUse::Basic); - } - - static unsigned - getHashValue(const std::pair &V) { - unsigned Result = DenseMapInfo::getHashValue(V.first); - Result ^= DenseMapInfo::getHashValue(unsigned(V.second)); - return Result; - } - - static bool isEqual(const std::pair &LHS, - const std::pair &RHS) { - return LHS == RHS; - } -}; - -/// IVInc - An individual increment in a Chain of IV increments. -/// Relate an IV user to an expression that computes the IV it uses from the IV -/// used by the previous link in the Chain. +/// An individual increment in a Chain of IV increments. Relate an IV user to +/// an expression that computes the IV it uses from the IV used by the previous +/// link in the Chain. /// /// For the head of a chain, IncExpr holds the absolute SCEV expression for the /// original IVOperand. The head of the chain's IVOperand is only valid during @@ -1450,54 +1597,52 @@ struct IVInc { UserInst(U), IVOperand(O), IncExpr(E) {} }; -// IVChain - The list of IV increments in program order. -// We typically add the head of a chain without finding subsequent links. +// The list of IV increments in program order. We typically add the head of a +// chain without finding subsequent links. struct IVChain { SmallVector Incs; const SCEV *ExprBase; - IVChain() : ExprBase(0) {} + IVChain() : ExprBase(nullptr) {} IVChain(const IVInc &Head, const SCEV *Base) : Incs(1, Head), ExprBase(Base) {} typedef SmallVectorImpl::const_iterator const_iterator; - // begin - return the first increment in the chain. + // Return the first increment in the chain. const_iterator begin() const { assert(!Incs.empty()); - return llvm::next(Incs.begin()); + return std::next(Incs.begin()); } const_iterator end() const { return Incs.end(); } - // hasIncs - Returns true if this chain contains any increments. + // Returns true if this chain contains any increments. bool hasIncs() const { return Incs.size() >= 2; } - // add - Add an IVInc to the end of this chain. + // Add an IVInc to the end of this chain. void add(const IVInc &X) { Incs.push_back(X); } - // tailUserInst - Returns the last UserInst in the chain. + // Returns the last UserInst in the chain. Instruction *tailUserInst() const { return Incs.back().UserInst; } - // isProfitableIncrement - Returns true if IncExpr can be profitably added to - // this chain. + // Returns true if IncExpr can be profitably added to this chain. bool isProfitableIncrement(const SCEV *OperExpr, const SCEV *IncExpr, ScalarEvolution&); }; -/// ChainUsers - Helper for CollectChains to track multiple IV increment uses. -/// Distinguish between FarUsers that definitely cross IV increments and -/// NearUsers that may be used between IV increments. +/// Helper for CollectChains to track multiple IV increment uses. Distinguish +/// between FarUsers that definitely cross IV increments and NearUsers that may +/// be used between IV increments. struct ChainUsers { SmallPtrSet FarUsers; SmallPtrSet NearUsers; }; -/// LSRInstance - This class holds state for the main loop strength reduction -/// logic. +/// This class holds state for the main loop strength reduction logic. class LSRInstance { IVUsers &IU; ScalarEvolution &SE; @@ -1507,25 +1652,25 @@ class LSRInstance { Loop *const L; bool Changed; - /// IVIncInsertPos - This is the insert position that the current loop's - /// induction variable increment should be placed. In simple loops, this is - /// the latch block's terminator. But in more complicated cases, this is a - /// position which will dominate all the in-loop post-increment users. + /// This is the insert position that the current loop's induction variable + /// increment should be placed. In simple loops, this is the latch block's + /// terminator. But in more complicated cases, this is a position which will + /// dominate all the in-loop post-increment users. Instruction *IVIncInsertPos; - /// Factors - Interesting factors between use strides. + /// Interesting factors between use strides. SmallSetVector Factors; - /// Types - Interesting use types, to facilitate truncation reuse. + /// Interesting use types, to facilitate truncation reuse. SmallSetVector Types; - /// Fixups - The list of operands which are to be replaced. + /// The list of operands which are to be replaced. SmallVector Fixups; - /// Uses - The list of interesting uses. + /// The list of interesting uses. SmallVector Uses; - /// RegUses - Track which uses use which register candidates. + /// Track which uses use which register candidates. RegUseTracker RegUses; // Limit the number of chains to avoid quadratic behavior. We don't expect to @@ -1533,10 +1678,10 @@ class LSRInstance { // back to normal LSR behavior for those uses. static const unsigned MaxChains = 8; - /// IVChainVec - IV users can form a chain of IV increments. + /// IV users can form a chain of IV increments. SmallVector IVChainVec; - /// IVIncSet - IV users that belong to profitable IVChains. + /// IV users that belong to profitable IVChains. SmallPtrSet IVIncSet; void OptimizeShadowIV(); @@ -1560,17 +1705,14 @@ class LSRInstance { } // Support for sharing of LSRUses between LSRFixups. - typedef DenseMap, - size_t, - UseMapDenseMapInfo> UseMapTy; + typedef DenseMap UseMapTy; UseMapTy UseMap; bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, - LSRUse::KindType Kind, Type *AccessTy); + LSRUse::KindType Kind, MemAccessTy AccessTy); - std::pair getUse(const SCEV *&Expr, - LSRUse::KindType Kind, - Type *AccessTy); + std::pair getUse(const SCEV *&Expr, LSRUse::KindType Kind, + MemAccessTy AccessTy); void DeleteUse(LSRUse &LU, size_t LUIdx); @@ -1585,8 +1727,19 @@ class LSRInstance { void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, unsigned Depth = 0); + + void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, + const Formula &Base, unsigned Depth, + size_t Idx, bool IsScaledReg = false); void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); + void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, + const Formula &Base, size_t Idx, + bool IsScaledReg = false); void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); + void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, + const Formula &Base, + const SmallVectorImpl &Worklist, + size_t Idx, bool IsScaledReg = false); void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); @@ -1652,8 +1805,8 @@ public: } -/// OptimizeShadowIV - If IV is used in a int-to-float cast -/// inside the loop then try to eliminate the cast operation. +/// If IV is used in a int-to-float cast inside the loop then try to eliminate +/// the cast operation. void LSRInstance::OptimizeShadowIV() { const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) @@ -1664,7 +1817,7 @@ void LSRInstance::OptimizeShadowIV() { IVUsers::const_iterator CandidateUI = UI; ++UI; Instruction *ShadowUse = CandidateUI->getUser(); - Type *DestTy = NULL; + Type *DestTy = nullptr; bool IsSigned = false; /* If shadow use is a int->float cast then insert a second IV @@ -1726,7 +1879,7 @@ void LSRInstance::OptimizeShadowIV() { continue; /* Initialize new IV, double d = 0.0 in above example. */ - ConstantInt *C = NULL; + ConstantInt *C = nullptr; if (Incr->getOperand(0) == PH) C = dyn_cast(Incr->getOperand(1)); else if (Incr->getOperand(1) == PH) @@ -1761,23 +1914,21 @@ void LSRInstance::OptimizeShadowIV() { } } -/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, -/// set the IV user and stride information and return true, otherwise return -/// false. +/// If Cond has an operand that is an expression of an IV, set the IV user and +/// stride information and return true, otherwise return false. bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { - for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) - if (UI->getUser() == Cond) { + for (IVStrideUse &U : IU) + if (U.getUser() == Cond) { // NOTE: we could handle setcc instructions with multiple uses here, but // InstCombine does it as well for simple uses, it's not clear that it // occurs enough in real life to handle. - CondUse = UI; + CondUse = &U; return true; } return false; } -/// OptimizeMax - Rewrite the loop's terminating condition if it uses -/// a max computation. +/// Rewrite the loop's terminating condition if it uses a max computation. /// /// This is a narrow solution to a specific, but acute, problem. For loops /// like this: @@ -1848,7 +1999,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { // for ICMP_ULE here because the comparison would be with zero, which // isn't interesting. CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; - const SCEVNAryExpr *Max = 0; + const SCEVNAryExpr *Max = nullptr; if (const SCEVSMaxExpr *S = dyn_cast(BackedgeTakenCount)) { Pred = ICmpInst::ICMP_SLE; Max = S; @@ -1891,7 +2042,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { // Check the right operand of the select, and remember it, as it will // be used in the new comparison instruction. - Value *NewRHS = 0; + Value *NewRHS = nullptr; if (ICmpInst::isTrueWhenEqual(Pred)) { // Look for n+1, and grab n. if (AddOperator *BO = dyn_cast(Sel->getOperand(1))) @@ -1935,8 +2086,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { return NewCond; } -/// OptimizeLoopTermCond - Change loop terminating condition to use the -/// postinc iv when possible. +/// Change loop terminating condition to use the postinc iv when possible. void LSRInstance::OptimizeLoopTermCond() { SmallPtrSet PostIncs; @@ -1945,8 +2095,7 @@ LSRInstance::OptimizeLoopTermCond() { SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); - for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { - BasicBlock *ExitingBlock = ExitingBlocks[i]; + for (BasicBlock *ExitingBlock : ExitingBlocks) { // Get the terminating condition for the loop if possible. If we // can, we want to change it to use a post-incremented version of its @@ -1961,7 +2110,7 @@ LSRInstance::OptimizeLoopTermCond() { continue; // Search IVUsesByStride to find Cond's IVUse if there is one. - IVStrideUse *CondUse = 0; + IVStrideUse *CondUse = nullptr; ICmpInst *Cond = cast(TermBr->getCondition()); if (!FindIVUserForCond(Cond, CondUse)) continue; @@ -2012,16 +2161,18 @@ LSRInstance::OptimizeLoopTermCond() { C->getValue().isMinSignedValue()) goto decline_post_inc; // Check for possible scaled-address reuse. - Type *AccessTy = getAccessType(UI->getUser()); + MemAccessTy AccessTy = getAccessType(UI->getUser()); int64_t Scale = C->getSExtValue(); - if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0, - /*BaseOffset=*/ 0, - /*HasBaseReg=*/ false, Scale)) + if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, + /*BaseOffset=*/0, + /*HasBaseReg=*/false, Scale, + AccessTy.AddrSpace)) goto decline_post_inc; Scale = -Scale; - if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0, - /*BaseOffset=*/ 0, - /*HasBaseReg=*/ false, Scale)) + if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, + /*BaseOffset=*/0, + /*HasBaseReg=*/false, Scale, + AccessTy.AddrSpace)) goto decline_post_inc; } } @@ -2040,7 +2191,7 @@ LSRInstance::OptimizeLoopTermCond() { ICmpInst *OldCond = Cond; Cond = cast(Cond->clone()); Cond->setName(L->getHeader()->getName() + ".termcond"); - ExitingBlock->getInstList().insert(TermBr, Cond); + ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); // Clone the IVUse, as the old use still exists! CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); @@ -2062,50 +2213,52 @@ LSRInstance::OptimizeLoopTermCond() { // must dominate all the post-inc comparisons we just set up, and it must // dominate the loop latch edge. IVIncInsertPos = L->getLoopLatch()->getTerminator(); - for (SmallPtrSet::const_iterator I = PostIncs.begin(), - E = PostIncs.end(); I != E; ++I) { + for (Instruction *Inst : PostIncs) { BasicBlock *BB = DT.findNearestCommonDominator(IVIncInsertPos->getParent(), - (*I)->getParent()); - if (BB == (*I)->getParent()) - IVIncInsertPos = *I; + Inst->getParent()); + if (BB == Inst->getParent()) + IVIncInsertPos = Inst; else if (BB != IVIncInsertPos->getParent()) IVIncInsertPos = BB->getTerminator(); } } -/// reconcileNewOffset - Determine if the given use can accommodate a fixup -/// at the given offset and other details. If so, update the use and -/// return true. -bool -LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, - LSRUse::KindType Kind, Type *AccessTy) { +/// Determine if the given use can accommodate a fixup at the given offset and +/// other details. If so, update the use and return true. +bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, + bool HasBaseReg, LSRUse::KindType Kind, + MemAccessTy AccessTy) { int64_t NewMinOffset = LU.MinOffset; int64_t NewMaxOffset = LU.MaxOffset; - Type *NewAccessTy = AccessTy; + MemAccessTy NewAccessTy = AccessTy; // Check for a mismatched kind. It's tempting to collapse mismatched kinds to // something conservative, however this can pessimize in the case that one of // the uses will have all its uses outside the loop, for example. if (LU.Kind != Kind) return false; + + // Check for a mismatched access type, and fall back conservatively as needed. + // TODO: Be less conservative when the type is similar and can use the same + // addressing modes. + if (Kind == LSRUse::Address) { + if (AccessTy != LU.AccessTy) + NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext()); + } + // Conservatively assume HasBaseReg is true for now. if (NewOffset < LU.MinOffset) { - if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, + if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, LU.MaxOffset - NewOffset, HasBaseReg)) return false; NewMinOffset = NewOffset; } else if (NewOffset > LU.MaxOffset) { - if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, + if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, NewOffset - LU.MinOffset, HasBaseReg)) return false; NewMaxOffset = NewOffset; } - // Check for a mismatched access type, and fall back conservatively as needed. - // TODO: Be less conservative when the type is similar and can use the same - // addressing modes. - if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) - NewAccessTy = Type::getVoidTy(AccessTy->getContext()); // Update the use. LU.MinOffset = NewMinOffset; @@ -2116,24 +2269,24 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, return true; } -/// getUse - Return an LSRUse index and an offset value for a fixup which -/// needs the given expression, with the given kind and optional access type. -/// Either reuse an existing use or create a new one, as needed. -std::pair -LSRInstance::getUse(const SCEV *&Expr, - LSRUse::KindType Kind, Type *AccessTy) { +/// Return an LSRUse index and an offset value for a fixup which needs the given +/// expression, with the given kind and optional access type. Either reuse an +/// existing use or create a new one, as needed. +std::pair LSRInstance::getUse(const SCEV *&Expr, + LSRUse::KindType Kind, + MemAccessTy AccessTy) { const SCEV *Copy = Expr; int64_t Offset = ExtractImmediate(Expr, SE); // Basic uses can't accept any offset, for example. - if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, + if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, Offset, /*HasBaseReg=*/ true)) { Expr = Copy; Offset = 0; } std::pair P = - UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); + UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); if (!P.second) { // A use already existed with this base. size_t LUIdx = P.first->second; @@ -2159,18 +2312,18 @@ LSRInstance::getUse(const SCEV *&Expr, return std::make_pair(LUIdx, Offset); } -/// DeleteUse - Delete the given use from the Uses list. +/// Delete the given use from the Uses list. void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { if (&LU != &Uses.back()) std::swap(LU, Uses.back()); Uses.pop_back(); // Update RegUses. - RegUses.SwapAndDropUse(LUIdx, Uses.size()); + RegUses.swapAndDropUse(LUIdx, Uses.size()); } -/// FindUseWithFormula - Look for a use distinct from OrigLU which is has -/// a formula that has the same registers as the given formula. +/// Look for a use distinct from OrigLU which is has a formula that has the same +/// registers as the given formula. LSRUse * LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, const LSRUse &OrigLU) { @@ -2188,9 +2341,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, LU.WidestFixupType == OrigLU.WidestFixupType && LU.HasFormulaWithSameRegs(OrigF)) { // Scan through this use's formulae. - for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), - E = LU.Formulae.end(); I != E; ++I) { - const Formula &F = *I; + for (const Formula &F : LU.Formulae) { // Check to see if this formula has the same registers and symbols // as OrigF. if (F.BaseRegs == OrigF.BaseRegs && @@ -2210,7 +2361,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, } // Nothing looked good. - return 0; + return nullptr; } void LSRInstance::CollectInterestingTypesAndFactors() { @@ -2218,8 +2369,8 @@ void LSRInstance::CollectInterestingTypesAndFactors() { // Collect interesting types and strides. SmallVector Worklist; - for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { - const SCEV *Expr = IU.getExpr(*UI); + for (const IVStrideUse &U : IU) { + const SCEV *Expr = IU.getExpr(U); // Collect interesting types. Types.insert(SE.getEffectiveSCEVType(Expr->getType())); @@ -2242,7 +2393,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() { for (SmallSetVector::const_iterator I = Strides.begin(), E = Strides.end(); I != E; ++I) for (SmallSetVector::const_iterator NewStrideIter = - llvm::next(I); NewStrideIter != E; ++NewStrideIter) { + std::next(I); NewStrideIter != E; ++NewStrideIter) { const SCEV *OldStride = *I; const SCEV *NewStride = *NewStrideIter; @@ -2276,9 +2427,9 @@ void LSRInstance::CollectInterestingTypesAndFactors() { DEBUG(print_factors_and_types(dbgs())); } -/// findIVOperand - Helper for CollectChains that finds an IV operand (computed -/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped -/// Instructions to IVStrideUses, we could partially skip this. +/// Helper for CollectChains that finds an IV operand (computed by an AddRec in +/// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to +/// IVStrideUses, we could partially skip this. static User::op_iterator findIVOperand(User::op_iterator OI, User::op_iterator OE, Loop *L, ScalarEvolution &SE) { @@ -2297,29 +2448,28 @@ findIVOperand(User::op_iterator OI, User::op_iterator OE, return OI; } -/// getWideOperand - IVChain logic must consistenctly peek base TruncInst -/// operands, so wrap it in a convenient helper. +/// IVChain logic must consistenctly peek base TruncInst operands, so wrap it in +/// a convenient helper. static Value *getWideOperand(Value *Oper) { if (TruncInst *Trunc = dyn_cast(Oper)) return Trunc->getOperand(0); return Oper; } -/// isCompatibleIVType - Return true if we allow an IV chain to include both -/// types. +/// Return true if we allow an IV chain to include both types. static bool isCompatibleIVType(Value *LVal, Value *RVal) { Type *LType = LVal->getType(); Type *RType = RVal->getType(); return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); } -/// getExprBase - Return an approximation of this SCEV expression's "base", or -/// NULL for any constant. Returning the expression itself is -/// conservative. Returning a deeper subexpression is more precise and valid as -/// long as it isn't less complex than another subexpression. For expressions -/// involving multiple unscaled values, we need to return the pointer-type -/// SCEVUnknown. This avoids forming chains across objects, such as: -/// PrevOper==a[i], IVOper==b[i], IVInc==b-a. +/// Return an approximation of this SCEV expression's "base", or NULL for any +/// constant. Returning the expression itself is conservative. Returning a +/// deeper subexpression is more precise and valid as long as it isn't less +/// complex than another subexpression. For expressions involving multiple +/// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids +/// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i], +/// IVInc==b-a. /// /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost /// SCEVUnknown, we simply return the rightmost SCEV operand. @@ -2328,7 +2478,7 @@ static const SCEV *getExprBase(const SCEV *S) { default: // uncluding scUnknown. return S; case scConstant: - return 0; + return nullptr; case scTruncate: return getExprBase(cast(S)->getOperand()); case scZeroExtend: @@ -2391,7 +2541,7 @@ bool IVChain::isProfitableIncrement(const SCEV *OperExpr, /// /// TODO: Consider IVInc free if it's already used in another chains. static bool -isProfitableChain(IVChain &Chain, SmallPtrSet &Users, +isProfitableChain(IVChain &Chain, SmallPtrSetImpl &Users, ScalarEvolution &SE, const TargetTransformInfo &TTI) { if (StressIVChain) return true; @@ -2401,9 +2551,8 @@ isProfitableChain(IVChain &Chain, SmallPtrSet &Users, if (!Users.empty()) { DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; - for (SmallPtrSet::const_iterator I = Users.begin(), - E = Users.end(); I != E; ++I) { - dbgs() << " " << **I << "\n"; + for (Instruction *Inst : Users) { + dbgs() << " " << *Inst << "\n"; }); return false; } @@ -2419,29 +2568,27 @@ isProfitableChain(IVChain &Chain, SmallPtrSet &Users, && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { --cost; } - const SCEV *LastIncExpr = 0; + const SCEV *LastIncExpr = nullptr; unsigned NumConstIncrements = 0; unsigned NumVarIncrements = 0; unsigned NumReusedIncrements = 0; - for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); - I != E; ++I) { - - if (I->IncExpr->isZero()) + for (const IVInc &Inc : Chain) { + if (Inc.IncExpr->isZero()) continue; // Incrementing by zero or some constant is neutral. We assume constants can // be folded into an addressing mode or an add's immediate operand. - if (isa(I->IncExpr)) { + if (isa(Inc.IncExpr)) { ++NumConstIncrements; continue; } - if (I->IncExpr == LastIncExpr) + if (Inc.IncExpr == LastIncExpr) ++NumReusedIncrements; else ++NumVarIncrements; - LastIncExpr = I->IncExpr; + LastIncExpr = Inc.IncExpr; } // An IV chain with a single increment is handled by LSR's postinc // uses. However, a chain with multiple increments requires keeping the IV's @@ -2465,8 +2612,7 @@ isProfitableChain(IVChain &Chain, SmallPtrSet &Users, return cost < 0; } -/// ChainInstruction - Add this IV user to an existing chain or make it the head -/// of a new chain. +/// Add this IV user to an existing chain or make it the head of a new chain. void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, SmallVectorImpl &ChainUsersVec) { // When IVs are used as types of varying widths, they are generally converted @@ -2478,7 +2624,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, // Visit all existing chains. Check if its IVOper can be computed as a // profitable loop invariant increment from the last link in the Chain. unsigned ChainIdx = 0, NChains = IVChainVec.size(); - const SCEV *LastIncExpr = 0; + const SCEV *LastIncExpr = nullptr; for (; ChainIdx < NChains; ++ChainIdx) { IVChain &Chain = IVChainVec[ChainIdx]; @@ -2550,9 +2696,8 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, // they will eventually be used be the current chain, or can be computed // from one of the chain increments. To be more precise we could // transitively follow its user and only add leaf IV users to the set. - for (Value::use_iterator UseIter = IVOper->use_begin(), - UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) { - Instruction *OtherUse = dyn_cast(*UseIter); + for (User *U : IVOper->users()) { + Instruction *OtherUse = dyn_cast(U); if (!OtherUse) continue; // Uses in the chain will no longer be uses if the chain is formed. @@ -2579,7 +2724,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); } -/// CollectChains - Populate the vector of Chains. +/// Populate the vector of Chains. /// /// This decreases ILP at the architecture level. Targets with ample registers, /// multiple memory ports, and no register renaming probably don't want @@ -2620,19 +2765,19 @@ void LSRInstance::CollectChains() { for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); I != E; ++I) { // Skip instructions that weren't seen by IVUsers analysis. - if (isa(I) || !IU.isIVUserOrOperand(I)) + if (isa(I) || !IU.isIVUserOrOperand(&*I)) continue; // Ignore users that are part of a SCEV expression. This way we only // consider leaf IV Users. This effectively rediscovers a portion of // IVUsers analysis but in program order this time. - if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(I))) + if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(&*I))) continue; // Remove this instruction from any NearUsers set it may be in. for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); ChainIdx < NChains; ++ChainIdx) { - ChainUsersVec[ChainIdx].NearUsers.erase(I); + ChainUsersVec[ChainIdx].NearUsers.erase(&*I); } // Search for operands that can be chained. SmallPtrSet UniqueOperands; @@ -2640,9 +2785,9 @@ void LSRInstance::CollectChains() { User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); while (IVOpIter != IVOpEnd) { Instruction *IVOpInst = cast(*IVOpIter); - if (UniqueOperands.insert(IVOpInst)) - ChainInstruction(I, IVOpInst, ChainUsersVec); - IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + if (UniqueOperands.insert(IVOpInst).second) + ChainInstruction(&*I, IVOpInst, ChainUsersVec); + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } } // Continue walking down the instructions. } // Continue walking down the domtree. @@ -2677,12 +2822,11 @@ void LSRInstance::FinalizeChain(IVChain &Chain) { assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); - for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); - I != E; ++I) { - DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n"); - User::op_iterator UseI = - std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand); - assert(UseI != I->UserInst->op_end() && "cannot find IV operand"); + for (const IVInc &Inc : Chain) { + DEBUG(dbgs() << " Inc: " << Inc.UserInst << "\n"); + auto UseI = std::find(Inc.UserInst->op_begin(), Inc.UserInst->op_end(), + Inc.IVOperand); + assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); IVIncSet.insert(UseI); } } @@ -2697,17 +2841,17 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, if (IncConst->getValue()->getValue().getMinSignedBits() > 64) return false; + MemAccessTy AccessTy = getAccessType(UserInst); int64_t IncOffset = IncConst->getValue()->getSExtValue(); - if (!isAlwaysFoldable(TTI, LSRUse::Address, - getAccessType(UserInst), /*BaseGV=*/ 0, - IncOffset, /*HaseBaseReg=*/ false)) + if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr, + IncOffset, /*HaseBaseReg=*/false)) return false; return true; } -/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to -/// materialize the IV user's operand from the previous IV user's operand. +/// Generate an add or subtract for each IVInc in a chain to materialize the IV +/// user's operand from the previous IV user's operand. void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, SmallVectorImpl &DeadInsts) { // Find the new IVOperand for the head of the chain. It may have been replaced @@ -2717,7 +2861,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), IVOpEnd, L, SE); - Value *IVSrc = 0; + Value *IVSrc = nullptr; while (IVOpIter != IVOpEnd) { IVSrc = getWideOperand(*IVOpIter); @@ -2733,7 +2877,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, || SE.getSCEV(IVSrc) == Head.IncExpr) { break; } - IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } if (IVOpIter == IVOpEnd) { // Gracefully give up on this chain. @@ -2744,21 +2888,19 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); Type *IVTy = IVSrc->getType(); Type *IntTy = SE.getEffectiveSCEVType(IVTy); - const SCEV *LeftOverExpr = 0; - for (IVChain::const_iterator IncI = Chain.begin(), - IncE = Chain.end(); IncI != IncE; ++IncI) { - - Instruction *InsertPt = IncI->UserInst; + const SCEV *LeftOverExpr = nullptr; + for (const IVInc &Inc : Chain) { + Instruction *InsertPt = Inc.UserInst; if (isa(InsertPt)) InsertPt = L->getLoopLatch()->getTerminator(); // IVOper will replace the current IV User's operand. IVSrc is the IV // value currently held in a register. Value *IVOper = IVSrc; - if (!IncI->IncExpr->isZero()) { + if (!Inc.IncExpr->isZero()) { // IncExpr was the result of subtraction of two narrow values, so must // be signed. - const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy); + const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); LeftOverExpr = LeftOverExpr ? SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; } @@ -2771,22 +2913,21 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); // If an IV increment can't be folded, use it as the next IV value. - if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, - TTI)) { + if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); IVSrc = IVOper; - LeftOverExpr = 0; + LeftOverExpr = nullptr; } } - Type *OperTy = IncI->IVOperand->getType(); + Type *OperTy = Inc.IVOperand->getType(); if (IVTy != OperTy) { assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && "cannot extend a chained IV"); IRBuilder<> Builder(InsertPt); IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); } - IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper); - DeadInsts.push_back(IncI->IVOperand); + Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); + DeadInsts.emplace_back(Inc.IVOperand); } // If LSR created a new, wider phi, we may also replace its postinc. We only // do this if we also found a wide value for the head of the chain. @@ -2808,17 +2949,17 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); } Phi->replaceUsesOfWith(PostIncV, IVOper); - DeadInsts.push_back(PostIncV); + DeadInsts.emplace_back(PostIncV); } } } void LSRInstance::CollectFixupsAndInitialFormulae() { - for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { - Instruction *UserInst = UI->getUser(); + for (const IVStrideUse &U : IU) { + Instruction *UserInst = U.getUser(); // Skip IV users that are part of profitable IV Chains. User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), - UI->getOperandValToReplace()); + U.getOperandValToReplace()); assert(UseI != UserInst->op_end() && "cannot find IV operand"); if (IVIncSet.count(UseI)) continue; @@ -2826,17 +2967,17 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { // Record the uses. LSRFixup &LF = getNewFixup(); LF.UserInst = UserInst; - LF.OperandValToReplace = UI->getOperandValToReplace(); - LF.PostIncLoops = UI->getPostIncLoops(); + LF.OperandValToReplace = U.getOperandValToReplace(); + LF.PostIncLoops = U.getPostIncLoops(); LSRUse::KindType Kind = LSRUse::Basic; - Type *AccessTy = 0; + MemAccessTy AccessTy; if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { Kind = LSRUse::Address; AccessTy = getAccessType(LF.UserInst); } - const SCEV *S = IU.getExpr(*UI); + const SCEV *S = IU.getExpr(U); // Equality (== and !=) ICmps are special. We can rewrite (i == N) as // (N - i == 0), and this allows (N - i) to be the expression that we work @@ -2858,10 +2999,10 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); - if (SE.isLoopInvariant(N, L) && isSafeToExpand(N)) { + if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { // S is normalized, so normalize N before folding it into S // to keep the result normalized. - N = TransformForPostIncUse(Normalize, N, CI, 0, + N = TransformForPostIncUse(Normalize, N, CI, nullptr, LF.PostIncLoops, SE, DT); Kind = LSRUse::ICmpZero; S = SE.getMinusSCEV(N, S); @@ -2896,19 +3037,22 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { DEBUG(print_fixups(dbgs())); } -/// InsertInitialFormula - Insert a formula for the given expression into -/// the given use, separating out loop-variant portions from loop-invariant -/// and loop-computable portions. +/// Insert a formula for the given expression into the given use, separating out +/// loop-variant portions from loop-invariant and loop-computable portions. void LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { + // Mark uses whose expressions cannot be expanded. + if (!isSafeToExpand(S, SE)) + LU.RigidFormula = true; + Formula F; - F.InitialMatch(S, L, SE); + F.initialMatch(S, L, SE); bool Inserted = InsertFormula(LU, LUIdx, F); assert(Inserted && "Initial formula already exists!"); (void)Inserted; } -/// InsertSupplementalFormula - Insert a simple single-register formula for -/// the given expression into the given use. +/// Insert a simple single-register formula for the given expression into the +/// given use. void LSRInstance::InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { @@ -2919,19 +3063,20 @@ LSRInstance::InsertSupplementalFormula(const SCEV *S, assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; } -/// CountRegisters - Note which registers are used by the given formula, -/// updating RegUses. +/// Note which registers are used by the given formula, updating RegUses. void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { if (F.ScaledReg) - RegUses.CountRegister(F.ScaledReg, LUIdx); - for (SmallVectorImpl::const_iterator I = F.BaseRegs.begin(), - E = F.BaseRegs.end(); I != E; ++I) - RegUses.CountRegister(*I, LUIdx); + RegUses.countRegister(F.ScaledReg, LUIdx); + for (const SCEV *BaseReg : F.BaseRegs) + RegUses.countRegister(BaseReg, LUIdx); } -/// InsertFormula - If the given formula has not yet been inserted, add it to -/// the list, and return true. Return false otherwise. +/// If the given formula has not yet been inserted, add it to the list, and +/// return true. Return false otherwise. bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { + // Do not insert formula that we will not be able to expand. + assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && + "Formula is illegal"); if (!LU.InsertFormula(F)) return false; @@ -2939,19 +3084,23 @@ bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { return true; } -/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of -/// loop-invariant values which we're tracking. These other uses will pin these -/// values in registers, making them less profitable for elimination. +/// Check for other uses of loop-invariant values which we're tracking. These +/// other uses will pin these values in registers, making them less profitable +/// for elimination. /// TODO: This currently misses non-constant addrec step registers. /// TODO: Should this give more weight to users inside the loop? void LSRInstance::CollectLoopInvariantFixupsAndFormulae() { SmallVector Worklist(RegUses.begin(), RegUses.end()); - SmallPtrSet Inserted; + SmallPtrSet Visited; while (!Worklist.empty()) { const SCEV *S = Worklist.pop_back_val(); + // Don't process the same SCEV twice + if (!Visited.insert(S).second) + continue; + if (const SCEVNAryExpr *N = dyn_cast(S)) Worklist.append(N->op_begin(), N->op_end()); else if (const SCEVCastExpr *C = dyn_cast(S)) @@ -2959,18 +3108,16 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { else if (const SCEVUDivExpr *D = dyn_cast(S)) { Worklist.push_back(D->getLHS()); Worklist.push_back(D->getRHS()); - } else if (const SCEVUnknown *U = dyn_cast(S)) { - if (!Inserted.insert(U)) continue; - const Value *V = U->getValue(); + } else if (const SCEVUnknown *US = dyn_cast(S)) { + const Value *V = US->getValue(); if (const Instruction *Inst = dyn_cast(V)) { // Look for instructions defined outside the loop. if (L->contains(Inst)) continue; } else if (isa(V)) // Undef doesn't have a live range, so it doesn't matter. continue; - for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); - UI != UE; ++UI) { - const Instruction *UserInst = dyn_cast(*UI); + for (const Use &U : V->uses()) { + const Instruction *UserInst = dyn_cast(U.getUser()); // Ignore non-instructions. if (!UserInst) continue; @@ -2982,9 +3129,12 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { const BasicBlock *UseBB = !isa(UserInst) ? UserInst->getParent() : cast(UserInst)->getIncomingBlock( - PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); + PHINode::getIncomingValueNumForOperand(U.getOperandNo())); if (!DT.dominates(L->getHeader(), UseBB)) continue; + // Don't bother if the instruction is in a BB which ends in an EHPad. + if (UseBB->getTerminator()->isEHPad()) + continue; // Ignore uses which are part of other SCEV expressions, to avoid // analyzing them multiple times. if (SE.isSCEVable(UserInst->getType())) { @@ -2992,7 +3142,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { // If the user is a no-op, look through to its uses. if (!isa(UserS)) continue; - if (UserS == U) { + if (UserS == US) { Worklist.push_back( SE.getUnknown(const_cast(UserInst))); continue; @@ -3000,7 +3150,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { } // Ignore icmp instructions which are already being analyzed. if (const ICmpInst *ICI = dyn_cast(UserInst)) { - unsigned OtherIdx = !UI.getOperandNo(); + unsigned OtherIdx = !U.getOperandNo(); Value *OtherOp = const_cast(ICI->getOperand(OtherIdx)); if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) continue; @@ -3008,8 +3158,9 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { LSRFixup &LF = getNewFixup(); LF.UserInst = const_cast(UserInst); - LF.OperandValToReplace = UI.getUse(); - std::pair P = getUse(S, LSRUse::Basic, 0); + LF.OperandValToReplace = U; + std::pair P = getUse( + S, LSRUse::Basic, MemAccessTy()); LF.LUIdx = P.first; LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; @@ -3018,7 +3169,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { SE.getTypeSizeInBits(LU.WidestFixupType) < SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) LU.WidestFixupType = LF.OperandValToReplace->getType(); - InsertSupplementalFormula(U, LU, LF.LUIdx); + InsertSupplementalFormula(US, LU, LF.LUIdx); CountRegisters(LU.Formulae.back(), Uses.size() - 1); break; } @@ -3026,8 +3177,8 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { } } -/// CollectSubexprs - Split S into subexpressions which can be pulled out into -/// separate registers. If C is non-null, multiply each subexpression by C. +/// Split S into subexpressions which can be pulled out into separate +/// registers. If C is non-null, multiply each subexpression by C. /// /// Return remainder expression after factoring the subexpressions captured by /// Ops. If Ops is complete, return NULL. @@ -3042,13 +3193,12 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, if (const SCEVAddExpr *Add = dyn_cast(S)) { // Break out add operands. - for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); - I != E; ++I) { - const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1); + for (const SCEV *S : Add->operands()) { + const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); if (Remainder) Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); } - return NULL; + return nullptr; } else if (const SCEVAddRecExpr *AR = dyn_cast(S)) { // Split a non-zero base out of an addrec. if (AR->getStart()->isZero()) @@ -3060,7 +3210,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, // does not pertain to this loop. if (Remainder && (AR->getLoop() == L || !isa(Remainder))) { Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); - Remainder = NULL; + Remainder = nullptr; } if (Remainder != AR->getStart()) { if (!Remainder) @@ -3082,105 +3232,126 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); if (Remainder) Ops.push_back(SE.getMulExpr(C, Remainder)); - return NULL; + return nullptr; } } return S; } -/// GenerateReassociations - Split out subexpressions from adds and the bases of -/// addrecs. -void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, - Formula Base, - unsigned Depth) { - // Arbitrarily cap recursion to protect compile time. - if (Depth >= 3) return; - - for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { - const SCEV *BaseReg = Base.BaseRegs[i]; +/// \brief Helper function for LSRInstance::GenerateReassociations. +void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, + const Formula &Base, + unsigned Depth, size_t Idx, + bool IsScaledReg) { + const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; + SmallVector AddOps; + const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); + if (Remainder) + AddOps.push_back(Remainder); + + if (AddOps.size() == 1) + return; - SmallVector AddOps; - const SCEV *Remainder = CollectSubexprs(BaseReg, 0, AddOps, L, SE); - if (Remainder) - AddOps.push_back(Remainder); + for (SmallVectorImpl::const_iterator J = AddOps.begin(), + JE = AddOps.end(); + J != JE; ++J) { - if (AddOps.size() == 1) continue; + // Loop-variant "unknown" values are uninteresting; we won't be able to + // do anything meaningful with them. + if (isa(*J) && !SE.isLoopInvariant(*J, L)) + continue; - for (SmallVectorImpl::const_iterator J = AddOps.begin(), - JE = AddOps.end(); J != JE; ++J) { + // Don't pull a constant into a register if the constant could be folded + // into an immediate field. + if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, *J, Base.getNumRegs() > 1)) + continue; - // Loop-variant "unknown" values are uninteresting; we won't be able to - // do anything meaningful with them. - if (isa(*J) && !SE.isLoopInvariant(*J, L)) - continue; + // Collect all operands except *J. + SmallVector InnerAddOps( + ((const SmallVector &)AddOps).begin(), J); + InnerAddOps.append(std::next(J), + ((const SmallVector &)AddOps).end()); + + // Don't leave just a constant behind in a register if the constant could + // be folded into an immediate field. + if (InnerAddOps.size() == 1 && + isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) + continue; - // Don't pull a constant into a register if the constant could be folded - // into an immediate field. - if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, - LU.AccessTy, *J, Base.getNumRegs() > 1)) - continue; + const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); + if (InnerSum->isZero()) + continue; + Formula F = Base; - // Collect all operands except *J. - SmallVector InnerAddOps - (((const SmallVector &)AddOps).begin(), J); - InnerAddOps.append - (llvm::next(J), ((const SmallVector &)AddOps).end()); - - // Don't leave just a constant behind in a register if the constant could - // be folded into an immediate field. - if (InnerAddOps.size() == 1 && - isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, - LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) - continue; + // Add the remaining pieces of the add back into the new formula. + const SCEVConstant *InnerSumSC = dyn_cast(InnerSum); + if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && + TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + InnerSumSC->getValue()->getZExtValue())) { + F.UnfoldedOffset = + (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); + if (IsScaledReg) + F.ScaledReg = nullptr; + else + F.BaseRegs.erase(F.BaseRegs.begin() + Idx); + } else if (IsScaledReg) + F.ScaledReg = InnerSum; + else + F.BaseRegs[Idx] = InnerSum; + + // Add J as its own register, or an unfolded immediate. + const SCEVConstant *SC = dyn_cast(*J); + if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && + TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + SC->getValue()->getZExtValue())) + F.UnfoldedOffset = + (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); + else + F.BaseRegs.push_back(*J); + // We may have changed the number of register in base regs, adjust the + // formula accordingly. + F.canonicalize(); + + if (InsertFormula(LU, LUIdx, F)) + // If that formula hadn't been seen before, recurse to find more like + // it. + GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth + 1); + } +} - const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); - if (InnerSum->isZero()) - continue; - Formula F = Base; +/// Split out subexpressions from adds and the bases of addrecs. +void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, + Formula Base, unsigned Depth) { + assert(Base.isCanonical() && "Input must be in the canonical form"); + // Arbitrarily cap recursion to protect compile time. + if (Depth >= 3) + return; - // Add the remaining pieces of the add back into the new formula. - const SCEVConstant *InnerSumSC = dyn_cast(InnerSum); - if (InnerSumSC && - SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && - TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + - InnerSumSC->getValue()->getZExtValue())) { - F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + - InnerSumSC->getValue()->getZExtValue(); - F.BaseRegs.erase(F.BaseRegs.begin() + i); - } else - F.BaseRegs[i] = InnerSum; - - // Add J as its own register, or an unfolded immediate. - const SCEVConstant *SC = dyn_cast(*J); - if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && - TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + - SC->getValue()->getZExtValue())) - F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + - SC->getValue()->getZExtValue(); - else - F.BaseRegs.push_back(*J); + for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) + GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); - if (InsertFormula(LU, LUIdx, F)) - // If that formula hadn't been seen before, recurse to find more like - // it. - GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); - } - } + if (Base.Scale == 1) + GenerateReassociationsImpl(LU, LUIdx, Base, Depth, + /* Idx */ -1, /* IsScaledReg */ true); } -/// GenerateCombinations - Generate a formula consisting of all of the -/// loop-dominating registers added into a single register. +/// Generate a formula consisting of all of the loop-dominating registers added +/// into a single register. void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base) { // This method is only interesting on a plurality of registers. - if (Base.BaseRegs.size() <= 1) return; + if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1) + return; + // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before + // processing the formula. + Base.unscale(); Formula F = Base; F.BaseRegs.clear(); SmallVector Ops; - for (SmallVectorImpl::const_iterator - I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { - const SCEV *BaseReg = *I; + for (const SCEV *BaseReg : Base.BaseRegs) { if (SE.properlyDominates(BaseReg, L->getHeader()) && !SE.hasComputableLoopEvolution(BaseReg, L)) Ops.push_back(BaseReg); @@ -3194,29 +3365,85 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, // rather than proceed with zero in a register. if (!Sum->isZero()) { F.BaseRegs.push_back(Sum); + F.canonicalize(); (void)InsertFormula(LU, LUIdx, F); } } } -/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. +/// \brief Helper function for LSRInstance::GenerateSymbolicOffsets. +void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, + const Formula &Base, size_t Idx, + bool IsScaledReg) { + const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; + GlobalValue *GV = ExtractSymbol(G, SE); + if (G->isZero() || !GV) + return; + Formula F = Base; + F.BaseGV = GV; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) + return; + if (IsScaledReg) + F.ScaledReg = G; + else + F.BaseRegs[Idx] = G; + (void)InsertFormula(LU, LUIdx, F); +} + +/// Generate reuse formulae using symbolic offsets. void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base) { // We can't add a symbolic offset if the address already contains one. if (Base.BaseGV) return; - for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { - const SCEV *G = Base.BaseRegs[i]; - GlobalValue *GV = ExtractSymbol(G, SE); - if (G->isZero() || !GV) - continue; + for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) + GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); + if (Base.Scale == 1) + GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, + /* IsScaledReg */ true); +} + +/// \brief Helper function for LSRInstance::GenerateConstantOffsets. +void LSRInstance::GenerateConstantOffsetsImpl( + LSRUse &LU, unsigned LUIdx, const Formula &Base, + const SmallVectorImpl &Worklist, size_t Idx, bool IsScaledReg) { + const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; + for (int64_t Offset : Worklist) { Formula F = Base; - F.BaseGV = GV; - if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) - continue; - F.BaseRegs[i] = G; - (void)InsertFormula(LU, LUIdx, F); + F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; + if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind, + LU.AccessTy, F)) { + // Add the offset to the base register. + const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); + // If it cancelled out, drop the base register, otherwise update it. + if (NewG->isZero()) { + if (IsScaledReg) { + F.Scale = 0; + F.ScaledReg = nullptr; + } else + F.deleteBaseReg(F.BaseRegs[Idx]); + F.canonicalize(); + } else if (IsScaledReg) + F.ScaledReg = NewG; + else + F.BaseRegs[Idx] = NewG; + + (void)InsertFormula(LU, LUIdx, F); + } } + + int64_t Imm = ExtractImmediate(G, SE); + if (G->isZero() || Imm == 0) + return; + Formula F = Base; + F.BaseOffset = (uint64_t)F.BaseOffset + Imm; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) + return; + if (IsScaledReg) + F.ScaledReg = G; + else + F.BaseRegs[Idx] = G; + (void)InsertFormula(LU, LUIdx, F); } /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. @@ -3229,42 +3456,15 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, if (LU.MaxOffset != LU.MinOffset) Worklist.push_back(LU.MaxOffset); - for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { - const SCEV *G = Base.BaseRegs[i]; - - for (SmallVectorImpl::const_iterator I = Worklist.begin(), - E = Worklist.end(); I != E; ++I) { - Formula F = Base; - F.BaseOffset = (uint64_t)Base.BaseOffset - *I; - if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind, - LU.AccessTy, F)) { - // Add the offset to the base register. - const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); - // If it cancelled out, drop the base register, otherwise update it. - if (NewG->isZero()) { - std::swap(F.BaseRegs[i], F.BaseRegs.back()); - F.BaseRegs.pop_back(); - } else - F.BaseRegs[i] = NewG; - - (void)InsertFormula(LU, LUIdx, F); - } - } - - int64_t Imm = ExtractImmediate(G, SE); - if (G->isZero() || Imm == 0) - continue; - Formula F = Base; - F.BaseOffset = (uint64_t)F.BaseOffset + Imm; - if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) - continue; - F.BaseRegs[i] = G; - (void)InsertFormula(LU, LUIdx, F); - } + for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) + GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); + if (Base.Scale == 1) + GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, + /* IsScaledReg */ true); } -/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up -/// the comparison. For example, x == y -> x*c == y*c. +/// For ICmpZero, check to see if we can scale up the comparison. For example, x +/// == y -> x*c == y*c. void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base) { if (LU.Kind != LSRUse::ICmpZero) return; @@ -3280,16 +3480,17 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, assert(!Base.BaseGV && "ICmpZero use is not legal!"); // Check each interesting stride. - for (SmallSetVector::const_iterator - I = Factors.begin(), E = Factors.end(); I != E; ++I) { - int64_t Factor = *I; - + for (int64_t Factor : Factors) { // Check that the multiplication doesn't overflow. if (Base.BaseOffset == INT64_MIN && Factor == -1) continue; int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; if (NewBaseOffset / Factor != Base.BaseOffset) continue; + // If the offset will be truncated at this use, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) + continue; // Check that multiplying with the use offset doesn't overflow. int64_t Offset = LU.MinOffset; @@ -3298,6 +3499,10 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Offset = (uint64_t)Offset * Factor; if (Offset / Factor != LU.MinOffset) continue; + // If the offset will be truncated at this use, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, Offset)) + continue; Formula F = Base; F.BaseOffset = NewBaseOffset; @@ -3332,6 +3537,10 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) continue; + // If the offset will be truncated, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) + continue; } // If we make it here and it's legal, add it. @@ -3340,21 +3549,22 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, } } -/// GenerateScales - Generate stride factor reuse formulae by making use of -/// scaled-offset address modes, for example. +/// Generate stride factor reuse formulae by making use of scaled-offset address +/// modes, for example. void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { // Determine the integer type for the base formula. Type *IntTy = Base.getType(); if (!IntTy) return; // If this Formula already has a scaled register, we can't add another one. - if (Base.Scale != 0) return; + // Try to unscale the formula to generate a better scale. + if (Base.Scale != 0 && !Base.unscale()) + return; - // Check each interesting stride. - for (SmallSetVector::const_iterator - I = Factors.begin(), E = Factors.end(); I != E; ++I) { - int64_t Factor = *I; + assert(Base.Scale == 0 && "unscale did not did its job!"); + // Check each interesting stride. + for (int64_t Factor : Factors) { Base.Scale = Factor; Base.HasBaseReg = Base.BaseRegs.size() > 1; // Check whether this scale is going to be legal. @@ -3388,14 +3598,19 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { // TODO: This could be optimized to avoid all the copying. Formula F = Base; F.ScaledReg = Quotient; - F.DeleteBaseReg(F.BaseRegs[i]); + F.deleteBaseReg(F.BaseRegs[i]); + // The canonical representation of 1*reg is reg, which is already in + // Base. In that case, do not try to insert the formula, it will be + // rejected anyway. + if (F.Scale == 1 && F.BaseRegs.empty()) + continue; (void)InsertFormula(LU, LUIdx, F); } } } } -/// GenerateTruncates - Generate reuse formulae from different IV types. +/// Generate reuse formulae from different IV types. void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { // Don't bother truncating symbolic values. if (Base.BaseGV) return; @@ -3405,16 +3620,13 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { if (!DstTy) return; DstTy = SE.getEffectiveSCEVType(DstTy); - for (SmallSetVector::const_iterator - I = Types.begin(), E = Types.end(); I != E; ++I) { - Type *SrcTy = *I; + for (Type *SrcTy : Types) { if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { Formula F = Base; - if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); - for (SmallVectorImpl::iterator J = F.BaseRegs.begin(), - JE = F.BaseRegs.end(); J != JE; ++J) - *J = SE.getAnyExtendExpr(*J, SrcTy); + if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); + for (const SCEV *&BaseReg : F.BaseRegs) + BaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); // TODO: This assumes we've done basic processing on all uses and // have an idea what the register usage is. @@ -3428,9 +3640,9 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { namespace { -/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to -/// defer modifications so that the search phase doesn't have to worry about -/// the data structures moving underneath it. +/// Helper class for GenerateCrossUseConstantOffsets. It's used to defer +/// modifications so that the search phase doesn't have to worry about the data +/// structures moving underneath it. struct WorkItem { size_t LUIdx; int64_t Imm; @@ -3456,25 +3668,22 @@ void WorkItem::dump() const { } #endif -/// GenerateCrossUseConstantOffsets - Look for registers which are a constant -/// distance apart and try to form reuse opportunities between them. +/// Look for registers which are a constant distance apart and try to form reuse +/// opportunities between them. void LSRInstance::GenerateCrossUseConstantOffsets() { // Group the registers by their value without any added constant offset. typedef std::map ImmMapTy; - typedef DenseMap RegMapTy; - RegMapTy Map; + DenseMap Map; DenseMap UsedByIndicesMap; SmallVector Sequence; - for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); - I != E; ++I) { - const SCEV *Reg = *I; + for (const SCEV *Use : RegUses) { + const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. int64_t Imm = ExtractImmediate(Reg, SE); - std::pair Pair = - Map.insert(std::make_pair(Reg, ImmMapTy())); + auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); if (Pair.second) Sequence.push_back(Reg); - Pair.first->second.insert(std::make_pair(Imm, *I)); - UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); + Pair.first->second.insert(std::make_pair(Imm, Use)); + UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); } // Now examine each set of registers with the same base value. Build up @@ -3482,9 +3691,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // not adding formulae and register counts while we're searching. SmallVector WorkItems; SmallSet, 32> UniqueItems; - for (SmallVectorImpl::const_iterator I = Sequence.begin(), - E = Sequence.end(); I != E; ++I) { - const SCEV *Reg = *I; + for (const SCEV *Reg : Sequence) { const ImmMapTy &Imms = Map.find(Reg)->second; // It's not worthwhile looking for reuse if there's only one offset. @@ -3492,9 +3699,8 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { continue; DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; - for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); - J != JE; ++J) - dbgs() << ' ' << J->first; + for (const auto &Entry : Imms) + dbgs() << ' ' << Entry.first; dbgs() << '\n'); // Examine each offset. @@ -3514,8 +3720,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // Conservatively examine offsets between this orig reg a few selected // other orig regs. ImmMapTy::const_iterator OtherImms[] = { - Imms.begin(), prior(Imms.end()), - Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) + Imms.begin(), std::prev(Imms.end()), + Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) / + 2) }; for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { ImmMapTy::const_iterator M = OtherImms[i]; @@ -3526,7 +3733,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; LUIdx = UsedByIndices.find_next(LUIdx)) // Make a memo of this use, offset, and register tuple. - if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) + if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); } } @@ -3538,9 +3745,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { UniqueItems.clear(); // Now iterate through the worklist and add new formulae. - for (SmallVectorImpl::const_iterator I = WorkItems.begin(), - E = WorkItems.end(); I != E; ++I) { - const WorkItem &WI = *I; + for (const WorkItem &WI : WorkItems) { size_t LUIdx = WI.LUIdx; LSRUse &LU = Uses[LUIdx]; int64_t Imm = WI.Imm; @@ -3552,7 +3757,12 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // TODO: Use a more targeted data structure. for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { - const Formula &F = LU.Formulae[L]; + Formula F = LU.Formulae[L]; + // FIXME: The code for the scaled and unscaled registers looks + // very similar but slightly different. Investigate if they + // could be merged. That way, we would not have to unscale the + // Formula. + F.unscale(); // Use the immediate in the scaled register. if (F.ScaledReg == OrigReg) { int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; @@ -3574,10 +3784,11 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale)) - .ule(abs64(NewF.BaseOffset))) + .ule(std::abs(NewF.BaseOffset))) continue; // OK, looks good. + NewF.canonicalize(); (void)InsertFormula(LU, LUIdx, NewF); } else { // Use the immediate in a base register. @@ -3599,18 +3810,17 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // If the new formula has a constant in a register, and adding the // constant value to the immediate would produce a value closer to // zero than the immediate itself, then the formula isn't worthwhile. - for (SmallVectorImpl::const_iterator - J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); - J != JE; ++J) - if (const SCEVConstant *C = dyn_cast(*J)) + for (const SCEV *NewReg : NewF.BaseRegs) + if (const SCEVConstant *C = dyn_cast(NewReg)) if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt( - abs64(NewF.BaseOffset)) && + std::abs(NewF.BaseOffset)) && (C->getValue()->getValue() + NewF.BaseOffset).countTrailingZeros() >= countTrailingZeros(NewF.BaseOffset)) goto skip_formula; // Ok, looks good. + NewF.canonicalize(); (void)InsertFormula(LU, LUIdx, NewF); break; skip_formula:; @@ -3620,7 +3830,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { } } -/// GenerateAllReuseFormulae - Generate formulae for each use. +/// Generate formulae for each use. void LSRInstance::GenerateAllReuseFormulae() { // This is split into multiple loops so that hasRegsUsedByUsesOtherThan @@ -3690,7 +3900,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { // the corresponding bad register from the Regs set. Cost CostF; Regs.clear(); - CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, + CostF.RateFormula(TTI, F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, LU, &LoserRegs); if (CostF.isLoser()) { // During initial formula generation, undesirable formulae are generated @@ -3704,9 +3914,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { } else { SmallVector Key; - for (SmallVectorImpl::const_iterator J = F.BaseRegs.begin(), - JE = F.BaseRegs.end(); J != JE; ++J) { - const SCEV *Reg = *J; + for (const SCEV *Reg : F.BaseRegs) { if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) Key.push_back(Reg); } @@ -3726,7 +3934,8 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { Cost CostBest; Regs.clear(); - CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); + CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, LU.Offsets, SE, + DT, LU); if (CostF < CostBest) std::swap(F, Best); DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); @@ -3761,15 +3970,13 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { // This is a rough guess that seems to work fairly well. static const size_t ComplexityLimit = UINT16_MAX; -/// EstimateSearchSpaceComplexity - Estimate the worst-case number of -/// solutions the solver might have to consider. It almost never considers -/// this many solutions because it prune the search space, but the pruning -/// isn't always sufficient. +/// Estimate the worst-case number of solutions the solver might have to +/// consider. It almost never considers this many solutions because it prune the +/// search space, but the pruning isn't always sufficient. size_t LSRInstance::EstimateSearchSpaceComplexity() const { size_t Power = 1; - for (SmallVectorImpl::const_iterator I = Uses.begin(), - E = Uses.end(); I != E; ++I) { - size_t FSize = I->Formulae.size(); + for (const LSRUse &LU : Uses) { + size_t FSize = LU.Formulae.size(); if (FSize >= ComplexityLimit) { Power = ComplexityLimit; break; @@ -3781,10 +3988,9 @@ size_t LSRInstance::EstimateSearchSpaceComplexity() const { return Power; } -/// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset -/// of the registers of another formula, it won't help reduce register -/// pressure (though it may not necessarily hurt register pressure); remove -/// it to simplify the system. +/// When one formula uses a superset of the registers of another formula, it +/// won't help reduce register pressure (though it may not necessarily hurt +/// register pressure); remove it to simplify the system. void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { DEBUG(dbgs() << "The search space is too complex.\n"); @@ -3845,9 +4051,8 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { } } -/// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers -/// for expressions like A, A+1, A+2, etc., allocate a single register for -/// them. +/// When there are many registers for expressions like A, A+1, A+2, etc., +/// allocate a single register for them. void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { if (EstimateSearchSpaceComplexity() < ComplexityLimit) return; @@ -3860,10 +4065,8 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; - for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), - E = LU.Formulae.end(); I != E; ++I) { - const Formula &F = *I; - if (F.BaseOffset == 0 || F.Scale != 0) + for (const Formula &F : LU.Formulae) { + if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) continue; LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); @@ -3879,9 +4082,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; // Update the relocs to reference the new use. - for (SmallVectorImpl::iterator I = Fixups.begin(), - E = Fixups.end(); I != E; ++I) { - LSRFixup &Fixup = *I; + for (LSRFixup &Fixup : Fixups) { if (Fixup.LUIdx == LUIdx) { Fixup.LUIdx = LUThatHas - &Uses.front(); Fixup.Offset += F.BaseOffset; @@ -3928,8 +4129,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } -/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call -/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that +/// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that /// we've done more filtering, as it may be able to find more formulae to /// eliminate. void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ @@ -3946,9 +4146,9 @@ void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ } } -/// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely -/// to be profitable, and then in any use which has any reference to that -/// register, delete all formulae which do not reference that register. +/// Pick a register which seems likely to be profitable, and then in any use +/// which has any reference to that register, delete all formulae which do not +/// reference that register. void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { // With all other options exhausted, loop until the system is simple // enough to handle. @@ -3960,11 +4160,9 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { // Pick the register which is used by the most LSRUses, which is likely // to be a good reuse register candidate. - const SCEV *Best = 0; + const SCEV *Best = nullptr; unsigned BestNum = 0; - for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); - I != E; ++I) { - const SCEV *Reg = *I; + for (const SCEV *Reg : RegUses) { if (Taken.count(Reg)) continue; if (!Best) @@ -4011,10 +4209,10 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { } } -/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of -/// formulae to choose from, use some rough heuristics to prune down the number -/// of formulae. This keeps the main solver from taking an extraordinary amount -/// of time in some worst-case scenarios. +/// If there are an extraordinary number of formulae to choose from, use some +/// rough heuristics to prune down the number of formulae. This keeps the main +/// solver from taking an extraordinary amount of time in some worst-case +/// scenarios. void LSRInstance::NarrowSearchSpaceUsingHeuristics() { NarrowSearchSpaceByDetectingSupersets(); NarrowSearchSpaceByCollapsingUnrolledCode(); @@ -4022,7 +4220,7 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() { NarrowSearchSpaceByPickingWinnerRegs(); } -/// SolveRecurse - This is the recursive solver. +/// This is the recursive solver. void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, Cost &SolutionCost, SmallVectorImpl &Workspace, @@ -4046,30 +4244,27 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, // reference that register in order to be considered. This prunes out // unprofitable searching. SmallSetVector ReqRegs; - for (SmallPtrSet::const_iterator I = CurRegs.begin(), - E = CurRegs.end(); I != E; ++I) - if (LU.Regs.count(*I)) - ReqRegs.insert(*I); + for (const SCEV *S : CurRegs) + if (LU.Regs.count(S)) + ReqRegs.insert(S); SmallPtrSet NewRegs; Cost NewCost; - for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), - E = LU.Formulae.end(); I != E; ++I) { - const Formula &F = *I; - - // Ignore formulae which do not use any of the required registers. - bool SatisfiedReqReg = true; - for (SmallSetVector::const_iterator J = ReqRegs.begin(), - JE = ReqRegs.end(); J != JE; ++J) { - const SCEV *Reg = *J; - if ((!F.ScaledReg || F.ScaledReg != Reg) && - std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == + for (const Formula &F : LU.Formulae) { + // Ignore formulae which may not be ideal in terms of register reuse of + // ReqRegs. The formula should use all required registers before + // introducing new ones. + int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); + for (const SCEV *Reg : ReqRegs) { + if ((F.ScaledReg && F.ScaledReg == Reg) || + std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) != F.BaseRegs.end()) { - SatisfiedReqReg = false; - break; + --NumReqRegsToFind; + if (NumReqRegsToFind == 0) + break; } } - if (!SatisfiedReqReg) { + if (NumReqRegsToFind != 0) { // If none of the formulae satisfied the required registers, then we could // clear ReqRegs and try again. Currently, we simply give up in this case. continue; @@ -4079,7 +4274,8 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, // the current best, prune the search at that point. NewCost = CurCost; NewRegs = CurRegs; - NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); + NewCost.RateFormula(TTI, F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT, + LU); if (NewCost < SolutionCost) { Workspace.push_back(&F); if (Workspace.size() != Uses.size()) { @@ -4090,9 +4286,8 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, } else { DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); dbgs() << ".\n Regs:"; - for (SmallPtrSet::const_iterator - I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) - dbgs() << ' ' << **I; + for (const SCEV *S : NewRegs) + dbgs() << ' ' << *S; dbgs() << '\n'); SolutionCost = NewCost; @@ -4103,12 +4298,12 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, } } -/// Solve - Choose one formula from each use. Return the results in the given -/// Solution vector. +/// Choose one formula from each use. Return the results in the given Solution +/// vector. void LSRInstance::Solve(SmallVectorImpl &Solution) const { SmallVector Workspace; Cost SolutionCost; - SolutionCost.Loose(); + SolutionCost.Lose(); Cost CurCost; SmallPtrSet CurRegs; DenseSet VisitedRegs; @@ -4138,10 +4333,9 @@ void LSRInstance::Solve(SmallVectorImpl &Solution) const { assert(Solution.size() == Uses.size() && "Malformed solution!"); } -/// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up -/// the dominator tree far as we can go while still being dominated by the -/// input positions. This helps canonicalize the insert position, which -/// encourages sharing. +/// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as +/// we can go while still being dominated by the input positions. This helps +/// canonicalize the insert position, which encourages sharing. BasicBlock::iterator LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, const SmallVectorImpl &Inputs) @@ -4166,11 +4360,9 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, } bool AllDominate = true; - Instruction *BetterPos = 0; + Instruction *BetterPos = nullptr; Instruction *Tentative = IDom->getTerminator(); - for (SmallVectorImpl::const_iterator I = Inputs.begin(), - E = Inputs.end(); I != E; ++I) { - Instruction *Inst = *I; + for (Instruction *Inst : Inputs) { if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { AllDominate = false; break; @@ -4179,21 +4371,21 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && (!BetterPos || !DT.dominates(Inst, BetterPos))) - BetterPos = llvm::next(BasicBlock::iterator(Inst)); + BetterPos = &*std::next(BasicBlock::iterator(Inst)); } if (!AllDominate) break; if (BetterPos) - IP = BetterPos; + IP = BetterPos->getIterator(); else - IP = Tentative; + IP = Tentative->getIterator(); } return IP; } -/// AdjustInsertPositionForExpand - Determine an input position which will be -/// dominated by the operands and which will dominate the result. +/// Determine an input position which will be dominated by the operands and +/// which will dominate the result. BasicBlock::iterator LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, const LSRFixup &LF, @@ -4217,9 +4409,7 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, } // The expansion must also be dominated by the increment positions of any // loops it for which it is using post-inc mode. - for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), - E = LF.PostIncLoops.end(); I != E; ++I) { - const Loop *PIL = *I; + for (const Loop *PIL : LF.PostIncLoops) { if (PIL == L) continue; // Be dominated by the loop exit. @@ -4233,7 +4423,7 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, } } - assert(!isa(LowestIP) && !isa(LowestIP) + assert(!isa(LowestIP) && !LowestIP->isEHPad() && !isa(LowestIP) && "Insertion point must be a normal instruction"); @@ -4245,7 +4435,7 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, while (isa(IP)) ++IP; // Ignore landingpad instructions. - while (isa(IP)) ++IP; + while (IP->isEHPad()) ++IP; // Ignore debug intrinsics. while (isa(IP)) ++IP; @@ -4253,19 +4443,22 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, // Set IP below instructions recently inserted by SCEVExpander. This keeps the // IP consistent across expansions and allows the previously inserted // instructions to be reused by subsequent expansion. - while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; + while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) + ++IP; return IP; } -/// Expand - Emit instructions for the leading candidate expression for this -/// LSRUse (this is called "expanding"). +/// Emit instructions for the leading candidate expression for this LSRUse (this +/// is called "expanding"). Value *LSRInstance::Expand(const LSRFixup &LF, const Formula &F, BasicBlock::iterator IP, SCEVExpander &Rewriter, SmallVectorImpl &DeadInsts) const { const LSRUse &LU = Uses[LF.LUIdx]; + if (LU.RigidFormula) + return LF.OperandValToReplace; // Determine an input position which will be dominated by the operands and // which will dominate the result. @@ -4292,9 +4485,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, SmallVector Ops; // Expand the BaseRegs portion. - for (SmallVectorImpl::const_iterator I = F.BaseRegs.begin(), - E = F.BaseRegs.end(); I != E; ++I) { - const SCEV *Reg = *I; + for (const SCEV *Reg : F.BaseRegs) { assert(!Reg->isZero() && "Zero allocated in a base register!"); // If we're expanding for a post-inc user, make the post-inc adjustment. @@ -4303,11 +4494,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF, LF.UserInst, LF.OperandValToReplace, Loops, SE, DT); - Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); + Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, &*IP))); } // Expand the ScaledReg portion. - Value *ICmpScaledV = 0; + Value *ICmpScaledV = nullptr; if (F.Scale != 0) { const SCEV *ScaledS = F.ScaledReg; @@ -4318,25 +4509,34 @@ Value *LSRInstance::Expand(const LSRFixup &LF, Loops, SE, DT); if (LU.Kind == LSRUse::ICmpZero) { - // An interesting way of "folding" with an icmp is to use a negated - // scale, which we'll implement by inserting it into the other operand - // of the icmp. - assert(F.Scale == -1 && - "The only scale supported by ICmpZero uses is -1!"); - ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); + // Expand ScaleReg as if it was part of the base regs. + if (F.Scale == 1) + Ops.push_back( + SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, &*IP))); + else { + // An interesting way of "folding" with an icmp is to use a negated + // scale, which we'll implement by inserting it into the other operand + // of the icmp. + assert(F.Scale == -1 && + "The only scale supported by ICmpZero uses is -1!"); + ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, &*IP); + } } else { // Otherwise just expand the scaled register and an explicit scale, // which is expected to be matched as part of the address. // Flush the operand list to suppress SCEVExpander hoisting address modes. - if (!Ops.empty() && LU.Kind == LSRUse::Address) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + // Unless the addressing mode will not be folded. + if (!Ops.empty() && LU.Kind == LSRUse::Address && + isAMCompletelyFolded(TTI, LU, F)) { + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } - ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); - ScaledS = SE.getMulExpr(ScaledS, - SE.getConstant(ScaledS->getType(), F.Scale)); + ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, &*IP)); + if (F.Scale != 1) + ScaledS = + SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); Ops.push_back(ScaledS); } } @@ -4345,7 +4545,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, if (F.BaseGV) { // Flush the operand list to suppress SCEVExpander hoisting. if (!Ops.empty()) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } @@ -4355,7 +4555,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Flush the operand list to suppress SCEVExpander hoisting of both folded and // unfolded offsets. LSR assumes they both live next to their uses. if (!Ops.empty()) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } @@ -4391,7 +4591,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, const SCEV *FullS = Ops.empty() ? SE.getConstant(IntTy, 0) : SE.getAddExpr(Ops); - Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); + Value *FullV = Rewriter.expandCodeFor(FullS, Ty, &*IP); // We're done expanding now, so reset the rewriter. Rewriter.clearPostInc(); @@ -4401,7 +4601,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // form, update the ICmp's other operand. if (LU.Kind == LSRUse::ICmpZero) { ICmpInst *CI = cast(LF.UserInst); - DeadInsts.push_back(CI->getOperand(1)); + DeadInsts.emplace_back(CI->getOperand(1)); assert(!F.BaseGV && "ICmp does not support folding a global value and " "a scale at the same time!"); if (F.Scale == -1) { @@ -4414,7 +4614,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF, } CI->setOperand(1, ICmpScaledV); } else { - assert(F.Scale == 0 && + // A scale of 1 means that the scale has been expanded as part of the + // base regs. + assert((F.Scale == 0 || F.Scale == 1) && "ICmp does not support folding a global value and " "a scale at the same time!"); Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), @@ -4431,9 +4633,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF, return FullV; } -/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use -/// of their operands effectively happens in their predecessor blocks, so the -/// expression may need to be expanded in multiple places. +/// Helper for Rewrite. PHI nodes are special because the use of their operands +/// effectively happens in their predecessor blocks, so the expression may need +/// to be expanded in multiple places. void LSRInstance::RewriteForPHI(PHINode *PN, const LSRFixup &LF, const Formula &F, @@ -4455,14 +4657,15 @@ void LSRInstance::RewriteForPHI(PHINode *PN, Loop *PNLoop = LI.getLoopFor(Parent); if (!PNLoop || Parent != PNLoop->getHeader()) { // Split the critical edge. - BasicBlock *NewBB = 0; + BasicBlock *NewBB = nullptr; if (!Parent->isLandingPad()) { - NewBB = SplitCriticalEdge(BB, Parent, P, - /*MergeIdenticalEdges=*/true, - /*DontDeleteUselessPhis=*/true); + NewBB = SplitCriticalEdge(BB, Parent, + CriticalEdgeSplittingOptions(&DT, &LI) + .setMergeIdenticalEdges() + .setDontDeleteUselessPHIs()); } else { SmallVector NewBBs; - SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); + SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI); NewBB = NewBBs[0]; } // If NewBB==NULL, then SplitCriticalEdge refused to split because all @@ -4484,11 +4687,12 @@ void LSRInstance::RewriteForPHI(PHINode *PN, } std::pair::iterator, bool> Pair = - Inserted.insert(std::make_pair(BB, static_cast(0))); + Inserted.insert(std::make_pair(BB, static_cast(nullptr))); if (!Pair.second) PN->setIncomingValue(i, Pair.first->second); else { - Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); + Value *FullV = Expand(LF, F, BB->getTerminator()->getIterator(), + Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); @@ -4505,9 +4709,9 @@ void LSRInstance::RewriteForPHI(PHINode *PN, } } -/// Rewrite - Emit instructions for the leading candidate expression for this -/// LSRUse (this is called "expanding"), and update the UserInst to reference -/// the newly expanded value. +/// Emit instructions for the leading candidate expression for this LSRUse (this +/// is called "expanding"), and update the UserInst to reference the newly +/// expanded value. void LSRInstance::Rewrite(const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, @@ -4518,7 +4722,8 @@ void LSRInstance::Rewrite(const LSRFixup &LF, if (PHINode *PN = dyn_cast(LF.UserInst)) { RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); } else { - Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); + Value *FullV = + Expand(LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); @@ -4540,11 +4745,11 @@ void LSRInstance::Rewrite(const LSRFixup &LF, LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); } - DeadInsts.push_back(LF.OperandValToReplace); + DeadInsts.emplace_back(LF.OperandValToReplace); } -/// ImplementSolution - Rewrite all the fixup locations with new values, -/// following the chosen solution. +/// Rewrite all the fixup locations with new values, following the chosen +/// solution. void LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, Pass *P) { @@ -4552,7 +4757,8 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, // we can remove them after we are done working. SmallVector DeadInsts; - SCEVExpander Rewriter(SE, "lsr"); + SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), + "lsr"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif @@ -4561,25 +4767,20 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, Rewriter.setIVIncInsertPos(L, IVIncInsertPos); // Mark phi nodes that terminate chains so the expander tries to reuse them. - for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), - ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { - if (PHINode *PN = dyn_cast(ChainI->tailUserInst())) + for (const IVChain &Chain : IVChainVec) { + if (PHINode *PN = dyn_cast(Chain.tailUserInst())) Rewriter.setChainedPhi(PN); } // Expand the new value definitions and update the users. - for (SmallVectorImpl::const_iterator I = Fixups.begin(), - E = Fixups.end(); I != E; ++I) { - const LSRFixup &Fixup = *I; - + for (const LSRFixup &Fixup : Fixups) { Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); Changed = true; } - for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), - ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { - GenerateIVChain(*ChainI, Rewriter, DeadInsts); + for (const IVChain &Chain : IVChainVec) { + GenerateIVChain(Chain, Rewriter, DeadInsts); Changed = true; } // Clean up after ourselves. This must be done before deleting any @@ -4590,10 +4791,13 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, } LSRInstance::LSRInstance(Loop *L, Pass *P) - : IU(P->getAnalysis()), SE(P->getAnalysis()), - DT(P->getAnalysis()), LI(P->getAnalysis()), - TTI(P->getAnalysis()), L(L), Changed(false), - IVIncInsertPos(0) { + : IU(P->getAnalysis()), + SE(P->getAnalysis().getSE()), + DT(P->getAnalysis().getDomTree()), + LI(P->getAnalysis().getLoopInfo()), + TTI(P->getAnalysis().getTTI( + *L->getHeader()->getParent())), + L(L), Changed(false), IVIncInsertPos(nullptr) { // If LoopSimplify form is not available, stay out of trouble. if (!L->isLoopSimplifyForm()) return; @@ -4604,10 +4808,10 @@ LSRInstance::LSRInstance(Loop *L, Pass *P) // If there's too much analysis to be done, bail early. We won't be able to // model the problem anyway. unsigned NumUsers = 0; - for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + for (const IVStrideUse &U : IU) { if (++NumUsers > MaxIVUsers) { - DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L - << "\n"); + (void)U; + DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U << "\n"); return; } } @@ -4630,7 +4834,7 @@ LSRInstance::LSRInstance(Loop *L, Pass *P) #endif // DEBUG DEBUG(dbgs() << "\nLSR on loop "; - WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); + L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); dbgs() << ":\n"); // First, perform some low-level loop optimizations. @@ -4676,14 +4880,10 @@ LSRInstance::LSRInstance(Loop *L, Pass *P) #ifndef NDEBUG // Formulae should be legal. - for (SmallVectorImpl::const_iterator I = Uses.begin(), E = Uses.end(); - I != E; ++I) { - const LSRUse &LU = *I; - for (SmallVectorImpl::const_iterator J = LU.Formulae.begin(), - JE = LU.Formulae.end(); - J != JE; ++J) + for (const LSRUse &LU : Uses) { + for (const Formula &F : LU.Formulae) assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, - *J) && "Illegal formula generated!"); + F) && "Illegal formula generated!"); }; #endif @@ -4697,44 +4897,38 @@ void LSRInstance::print_factors_and_types(raw_ostream &OS) const { OS << "LSR has identified the following interesting factors and types: "; bool First = true; - for (SmallSetVector::const_iterator - I = Factors.begin(), E = Factors.end(); I != E; ++I) { + for (int64_t Factor : Factors) { if (!First) OS << ", "; First = false; - OS << '*' << *I; + OS << '*' << Factor; } - for (SmallSetVector::const_iterator - I = Types.begin(), E = Types.end(); I != E; ++I) { + for (Type *Ty : Types) { if (!First) OS << ", "; First = false; - OS << '(' << **I << ')'; + OS << '(' << *Ty << ')'; } OS << '\n'; } void LSRInstance::print_fixups(raw_ostream &OS) const { OS << "LSR is examining the following fixup sites:\n"; - for (SmallVectorImpl::const_iterator I = Fixups.begin(), - E = Fixups.end(); I != E; ++I) { + for (const LSRFixup &LF : Fixups) { dbgs() << " "; - I->print(OS); + LF.print(OS); OS << '\n'; } } void LSRInstance::print_uses(raw_ostream &OS) const { OS << "LSR is examining the following uses:\n"; - for (SmallVectorImpl::const_iterator I = Uses.begin(), - E = Uses.end(); I != E; ++I) { - const LSRUse &LU = *I; + for (const LSRUse &LU : Uses) { dbgs() << " "; LU.print(OS); OS << '\n'; - for (SmallVectorImpl::const_iterator J = LU.Formulae.begin(), - JE = LU.Formulae.end(); J != JE; ++J) { + for (const Formula &F : LU.Formulae) { OS << " "; - J->print(OS); + F.print(OS); OS << '\n'; } } @@ -4760,8 +4954,8 @@ public: LoopStrengthReduce(); private: - bool runOnLoop(Loop *L, LPPassManager &LPM); - void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnLoop(Loop *L, LPPassManager &LPM) override; + void getAnalysisUsage(AnalysisUsage &AU) const override; }; } @@ -4769,11 +4963,11 @@ private: char LoopStrengthReduce::ID = 0; INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) -INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(IVUsers) -INITIALIZE_PASS_DEPENDENCY(LoopInfo) +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) @@ -4792,22 +4986,25 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { // many analyses if they are around. AU.addPreservedID(LoopSimplifyID); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addRequiredID(LoopSimplifyID); - AU.addRequired(); - AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); // Requiring LoopSimplify a second time here prevents IVUsers from running // twice, since LoopSimplify was invalidated by running ScalarEvolution. AU.addRequiredID(LoopSimplifyID); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); + AU.addRequired(); } bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { + if (skipOptnoneFunction(L)) + return false; + bool Changed = false; // Run the main LSR transformation. @@ -4817,14 +5014,16 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { Changed |= DeleteDeadPHIs(L->getHeader()); if (EnablePhiElim && L->isLoopSimplifyForm()) { SmallVector DeadInsts; - SCEVExpander Rewriter(getAnalysis(), "lsr"); + const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); + SCEVExpander Rewriter(getAnalysis().getSE(), DL, + "lsr"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif - unsigned numFolded = - Rewriter.replaceCongruentIVs(L, &getAnalysis(), - DeadInsts, - &getAnalysis()); + unsigned numFolded = Rewriter.replaceCongruentIVs( + L, &getAnalysis().getDomTree(), DeadInsts, + &getAnalysis().getTTI( + *L->getHeader()->getParent())); if (numFolded) { Changed = true; DeleteTriviallyDeadInstructions(DeadInsts);