X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FLoopStrengthReduce.cpp;h=13e4fceec664fd672e4828ec63b9ea2ef931658f;hp=7b1a340156d172bc66061df7571e089b7669b466;hb=d761cc1dfa7bb55c0c995dd409025147a51c1258;hpb=5219f86a0bab11dd6895a31653e371e9871a6734 diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 7b1a340156d..13e4fceec66 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -37,8 +37,8 @@ // // TODO: Handle multiple loops at a time. // -// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr -// instead of a GlobalValue? +// TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead +// of a GlobalValue? // // TODO: When truncation is free, truncate ICmp users' operands to make it a // smaller encoding (on x86 at least). @@ -53,37 +53,54 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "loop-reduce" #include "llvm/Transforms/Scalar.h" -#include "llvm/Constants.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/DerivedTypes.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallBitVector.h" #include "llvm/Analysis/IVUsers.h" -#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Local.h" -#include "llvm/ADT/SmallBitVector.h" -#include "llvm/ADT/SetVector.h" -#include "llvm/ADT/DenseSet.h" -#include "llvm/Support/Debug.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/ValueHandle.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Support/ValueHandle.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetLowering.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" #include using namespace llvm; -namespace llvm { -cl::opt EnableNested( - "enable-lsr-nested", cl::Hidden, cl::desc("Enable LSR on nested loops")); +#define DEBUG_TYPE "loop-reduce" -cl::opt EnableRetry( - "enable-lsr-retry", cl::Hidden, cl::desc("Enable LSR retry")); -} +/// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for +/// bail out. This threshold is far beyond the number of users that LSR can +/// conceivably solve, so it should not affect generated code, but catches the +/// worst cases before LSR burns too much compile time and stack space. +static const unsigned MaxIVUsers = 200; + +// Temporary flag to cleanup congruent phis after LSR phi expansion. +// It's currently disabled until we can determine whether it's truly useful or +// not. The flag should be removed after the v3.0 release. +// This is now needed for ivchains. +static cl::opt EnablePhiElim( + "enable-lsr-phielim", cl::Hidden, cl::init(true), + cl::desc("Enable LSR phi elimination")); + +#ifndef NDEBUG +// Stress test IV chain generation. +static cl::opt StressIVChain( + "stress-ivchain", cl::Hidden, cl::init(false), + cl::desc("Stress test LSR IV chains")); +#else +static bool StressIVChain = false; +#endif namespace { @@ -106,9 +123,11 @@ void RegSortData::print(raw_ostream &OS) const { OS << "[NumUses=" << UsedByIndices.count() << ']'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void RegSortData::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { @@ -206,16 +225,24 @@ namespace { /// computing satisfying a use. It may include broken-out immediates and scaled /// registers. struct Formula { - /// AM - This is used to represent complex addressing, as well as other kinds - /// of interesting uses. - TargetLowering::AddrMode AM; + /// Global base address used for complex addressing. + GlobalValue *BaseGV; + + /// Base offset for complex addressing. + int64_t BaseOffset; + + /// Whether any complex addressing has a base register. + bool HasBaseReg; + + /// The scale of any complex addressing. + int64_t Scale; /// BaseRegs - The list of "base" registers for this use. When this is - /// non-empty, AM.HasBaseReg should be set to true. - SmallVector BaseRegs; + /// non-empty, + SmallVector BaseRegs; /// ScaledReg - The 'scaled' register for this use. This should be non-null - /// when AM.Scale is not zero. + /// when Scale is not zero. const SCEV *ScaledReg; /// UnfoldedOffset - An additional constant offset which added near the @@ -223,11 +250,13 @@ struct Formula { /// live in an add immediate field rather than a register. int64_t UnfoldedOffset; - Formula() : ScaledReg(0), UnfoldedOffset(0) {} + Formula() + : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0), + ScaledReg(nullptr), UnfoldedOffset(0) {} void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); - unsigned getNumRegs() const; + size_t getNumRegs() const; Type *getType() const; void DeleteBaseReg(const SCEV *&S); @@ -309,20 +338,20 @@ void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { const SCEV *Sum = SE.getAddExpr(Good); if (!Sum->isZero()) BaseRegs.push_back(Sum); - AM.HasBaseReg = true; + HasBaseReg = true; } if (!Bad.empty()) { const SCEV *Sum = SE.getAddExpr(Bad); if (!Sum->isZero()) BaseRegs.push_back(Sum); - AM.HasBaseReg = true; + HasBaseReg = true; } } /// getNumRegs - Return the total number of register operands used by this /// formula. This does not include register uses implied by non-constant /// addrec strides. -unsigned Formula::getNumRegs() const { +size_t Formula::getNumRegs() const { return !!ScaledReg + BaseRegs.size(); } @@ -331,8 +360,8 @@ unsigned Formula::getNumRegs() const { Type *Formula::getType() const { return !BaseRegs.empty() ? BaseRegs.front()->getType() : ScaledReg ? ScaledReg->getType() : - AM.BaseGV ? AM.BaseGV->getType() : - 0; + BaseGV ? BaseGV->getType() : + nullptr; } /// DeleteBaseReg - Delete the given base reg from the BaseRegs list. @@ -364,29 +393,29 @@ bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, void Formula::print(raw_ostream &OS) const { bool First = true; - if (AM.BaseGV) { + if (BaseGV) { if (!First) OS << " + "; else First = false; - WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); + BaseGV->printAsOperand(OS, /*PrintType=*/false); } - if (AM.BaseOffs != 0) { + if (BaseOffset != 0) { if (!First) OS << " + "; else First = false; - OS << AM.BaseOffs; + OS << BaseOffset; } for (SmallVectorImpl::const_iterator I = BaseRegs.begin(), E = BaseRegs.end(); I != E; ++I) { if (!First) OS << " + "; else First = false; OS << "reg(" << **I << ')'; } - if (AM.HasBaseReg && BaseRegs.empty()) { + if (HasBaseReg && BaseRegs.empty()) { if (!First) OS << " + "; else First = false; OS << "**error: HasBaseReg**"; - } else if (!AM.HasBaseReg && !BaseRegs.empty()) { + } else if (!HasBaseReg && !BaseRegs.empty()) { if (!First) OS << " + "; else First = false; OS << "**error: !HasBaseReg**"; } - if (AM.Scale != 0) { + if (Scale != 0) { if (!First) OS << " + "; else First = false; - OS << AM.Scale << "*reg("; + OS << Scale << "*reg("; if (ScaledReg) OS << *ScaledReg; else @@ -394,14 +423,16 @@ void Formula::print(raw_ostream &OS) const { OS << ')'; } if (UnfoldedOffset != 0) { - if (!First) OS << " + "; else First = false; + if (!First) OS << " + "; OS << "imm(" << UnfoldedOffset << ')'; } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Formula::dump() const { print(errs()); errs() << '\n'; } +#endif /// isAddRecSExtable - Return true if the given addrec can be sign-extended /// without changing its value. @@ -457,11 +488,11 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, // Check for a division of a constant by a constant. if (const SCEVConstant *C = dyn_cast(LHS)) { if (!RC) - return 0; + return nullptr; const APInt &LA = C->getValue()->getValue(); const APInt &RA = RC->getValue()->getValue(); if (LA.srem(RA) != 0) - return 0; + return nullptr; return SE.getConstant(LA.sdiv(RA)); } @@ -470,16 +501,16 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, IgnoreSignificantBits); - if (!Step) return 0; + if (!Step) return nullptr; const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, IgnoreSignificantBits); - if (!Start) return 0; + if (!Start) return nullptr; // FlagNW is independent of the start value, step direction, and is // preserved with smaller magnitude steps. // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); } - return 0; + return nullptr; } // Distribute the sdiv over add operands, if the add doesn't overflow. @@ -490,12 +521,12 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, I != E; ++I) { const SCEV *Op = getExactSDiv(*I, RHS, SE, IgnoreSignificantBits); - if (!Op) return 0; + if (!Op) return nullptr; Ops.push_back(Op); } return SE.getAddExpr(Ops); } - return 0; + return nullptr; } // Check for a multiply operand that we can pull RHS out of. @@ -514,13 +545,13 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, } Ops.push_back(S); } - return Found ? SE.getMulExpr(Ops) : 0; + return Found ? SE.getMulExpr(Ops) : nullptr; } - return 0; + return nullptr; } // Otherwise we don't know. - return 0; + return nullptr; } /// ExtractImmediate - If S involves the addition of a constant integer value, @@ -574,7 +605,7 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { SCEV::FlagAnyWrap); return Result; } - return 0; + return nullptr; } /// isAddressUse - Returns true if the specified instruction is using the @@ -630,6 +661,90 @@ static Type *getAccessType(const Instruction *Inst) { return AccessTy; } +/// isExistingPhi - Return true if this AddRec is already a phi in its loop. +static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { + for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); + PHINode *PN = dyn_cast(I); ++I) { + if (SE.isSCEVable(PN->getType()) && + (SE.getEffectiveSCEVType(PN->getType()) == + SE.getEffectiveSCEVType(AR->getType())) && + SE.getSCEV(PN) == AR) + return true; + } + return false; +} + +/// Check if expanding this expression is likely to incur significant cost. This +/// is tricky because SCEV doesn't track which expressions are actually computed +/// by the current IR. +/// +/// We currently allow expansion of IV increments that involve adds, +/// multiplication by constants, and AddRecs from existing phis. +/// +/// TODO: Allow UDivExpr if we can find an existing IV increment that is an +/// obvious multiple of the UDivExpr. +static bool isHighCostExpansion(const SCEV *S, + SmallPtrSet &Processed, + ScalarEvolution &SE) { + // Zero/One operand expressions + switch (S->getSCEVType()) { + case scUnknown: + case scConstant: + return false; + case scTruncate: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + case scZeroExtend: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + case scSignExtend: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + } + + if (!Processed.insert(S)) + return false; + + if (const SCEVAddExpr *Add = dyn_cast(S)) { + for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); + I != E; ++I) { + if (isHighCostExpansion(*I, Processed, SE)) + return true; + } + return false; + } + + if (const SCEVMulExpr *Mul = dyn_cast(S)) { + if (Mul->getNumOperands() == 2) { + // Multiplication by a constant is ok + if (isa(Mul->getOperand(0))) + return isHighCostExpansion(Mul->getOperand(1), Processed, SE); + + // If we have the value of one operand, check if an existing + // multiplication already generates this expression. + if (const SCEVUnknown *U = dyn_cast(Mul->getOperand(1))) { + Value *UVal = U->getValue(); + for (User *UR : UVal->users()) { + // If U is a constant, it may be used by a ConstantExpr. + Instruction *UI = dyn_cast(UR); + if (UI && UI->getOpcode() == Instruction::Mul && + SE.isSCEVable(UI->getType())) { + return SE.getSCEV(UI) == Mul; + } + } + } + } + } + + if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (isExistingPhi(AR, SE)) + return false; + } + + // Fow now, consider any other type of expression (div/mul/min/max) high cost. + return true; +} + /// DeleteTriviallyDeadInstructions - If any of the instructions is the /// specified set are trivially dead, delete them and see if this makes any of /// their operands subsequently dead. @@ -638,14 +753,15 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { bool Changed = false; while (!DeadInsts.empty()) { - Instruction *I = dyn_cast_or_null(&*DeadInsts.pop_back_val()); + Value *V = DeadInsts.pop_back_val(); + Instruction *I = dyn_cast_or_null(V); - if (I == 0 || !isInstructionTriviallyDead(I)) + if (!I || !isInstructionTriviallyDead(I)) continue; for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast(*OI)) { - *OI = 0; + *OI = nullptr; if (U->use_empty()) DeadInsts.push_back(U); } @@ -657,6 +773,16 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { return Changed; } +namespace { +class LSRUse; +} +// Check if it is legal to fold 2 base registers. +static bool isLegal2RegAMUse(const TargetTransformInfo &TTI, const LSRUse &LU, + const Formula &F); +// Get the cost of the scaling factor used in F for LU. +static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F); + namespace { /// Cost - This class is used to measure and compare candidate formulae. @@ -669,23 +795,24 @@ class Cost { unsigned NumBaseAdds; unsigned ImmCost; unsigned SetupCost; + unsigned ScaleCost; public: Cost() : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), - SetupCost(0) {} + SetupCost(0), ScaleCost(0) {} bool operator<(const Cost &Other) const; - void Loose(); + void Lose(); #ifndef NDEBUG // Once any of the metrics loses, they must all remain losers. bool isValid() { return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds - | ImmCost | SetupCost) != ~0u) + | ImmCost | SetupCost | ScaleCost) != ~0u) || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds - & ImmCost & SetupCost) == ~0u); + & ImmCost & SetupCost & ScaleCost) == ~0u); } #endif @@ -694,12 +821,15 @@ public: return NumRegs == ~0u; } - void RateFormula(const Formula &F, + void RateFormula(const TargetTransformInfo &TTI, + const Formula &F, SmallPtrSet &Regs, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, - ScalarEvolution &SE, DominatorTree &DT); + ScalarEvolution &SE, DominatorTree &DT, + const LSRUse &LU, + SmallPtrSet *LoserRegs = nullptr); void print(raw_ostream &OS) const; void dump() const; @@ -712,7 +842,8 @@ private: void RatePrimaryRegister(const SCEV *Reg, SmallPtrSet &Regs, const Loop *L, - ScalarEvolution &SE, DominatorTree &DT); + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs); }; } @@ -723,41 +854,20 @@ void Cost::RateRegister(const SCEV *Reg, const Loop *L, ScalarEvolution &SE, DominatorTree &DT) { if (const SCEVAddRecExpr *AR = dyn_cast(Reg)) { - if (AR->getLoop() == L) - AddRecCost += 1; /// TODO: This should be a function of the stride. - // If this is an addrec for another loop, don't second-guess its addrec phi // nodes. LSR isn't currently smart enough to reason about more than one - // loop at a time. LSR has either already run on inner loops, will not run - // on other loops, and cannot be expected to change sibling loops. If the - // AddRec exists, consider it's register free and leave it alone. Otherwise, - // do not consider this formula at all. - // FIXME: why do we need to generate such fomulae? - else if (!EnableNested || L->contains(AR->getLoop()) || - (!AR->getLoop()->contains(L) && - DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { - for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); - PHINode *PN = dyn_cast(I); ++I) { - if (SE.isSCEVable(PN->getType()) && - (SE.getEffectiveSCEVType(PN->getType()) == - SE.getEffectiveSCEVType(AR->getType())) && - SE.getSCEV(PN) == AR) - return; - } - if (!EnableNested) { - Loose(); + // loop at a time. LSR has already run on inner loops, will not run on outer + // loops, and cannot be expected to change sibling loops. + if (AR->getLoop() != L) { + // If the AddRec exists, consider it's register free and leave it alone. + if (isExistingPhi(AR, SE)) return; - } - // If this isn't one of the addrecs that the loop already has, it - // would require a costly new phi and add. TODO: This isn't - // precisely modeled right now. - ++NumBaseAdds; - if (!Regs.count(AR->getStart())) { - RateRegister(AR->getStart(), Regs, L, SE, DT); - if (isLoser()) - return; - } + + // Otherwise, do not consider this formula at all. + Lose(); + return; } + AddRecCost += 1; /// TODO: This should be a function of the stride. // Add the step value register, if it needs one. // TODO: The non-affine case isn't precisely modeled here. @@ -785,28 +895,40 @@ void Cost::RateRegister(const SCEV *Reg, } /// RatePrimaryRegister - Record this register in the set. If we haven't seen it -/// before, rate it. +/// before, rate it. Optional LoserRegs provides a way to declare any formula +/// that refers to one of those regs an instant loser. void Cost::RatePrimaryRegister(const SCEV *Reg, SmallPtrSet &Regs, const Loop *L, - ScalarEvolution &SE, DominatorTree &DT) { - if (Regs.insert(Reg)) + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs) { + if (LoserRegs && LoserRegs->count(Reg)) { + Lose(); + return; + } + if (Regs.insert(Reg)) { RateRegister(Reg, Regs, L, SE, DT); + if (LoserRegs && isLoser()) + LoserRegs->insert(Reg); + } } -void Cost::RateFormula(const Formula &F, +void Cost::RateFormula(const TargetTransformInfo &TTI, + const Formula &F, SmallPtrSet &Regs, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, - ScalarEvolution &SE, DominatorTree &DT) { + ScalarEvolution &SE, DominatorTree &DT, + const LSRUse &LU, + SmallPtrSet *LoserRegs) { // Tally up the registers. if (const SCEV *ScaledReg = F.ScaledReg) { if (VisitedRegs.count(ScaledReg)) { - Loose(); + Lose(); return; } - RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); + RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); if (isLoser()) return; } @@ -814,10 +936,10 @@ void Cost::RateFormula(const Formula &F, E = F.BaseRegs.end(); I != E; ++I) { const SCEV *BaseReg = *I; if (VisitedRegs.count(BaseReg)) { - Loose(); + Lose(); return; } - RatePrimaryRegister(BaseReg, Regs, L, SE, DT); + RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); if (isLoser()) return; } @@ -825,13 +947,18 @@ void Cost::RateFormula(const Formula &F, // Determine how many (unfolded) adds we'll need inside the loop. size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); if (NumBaseParts > 1) - NumBaseAdds += NumBaseParts - 1; + // Do not count the base and a possible second register if the target + // allows to fold 2 registers. + NumBaseAdds += NumBaseParts - (1 + isLegal2RegAMUse(TTI, LU, F)); + + // Accumulate non-free scaling amounts. + ScaleCost += getScalingFactorCost(TTI, LU, F); // Tally up the non-zero immediates. for (SmallVectorImpl::const_iterator I = Offsets.begin(), E = Offsets.end(); I != E; ++I) { - int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; - if (F.AM.BaseGV) + int64_t Offset = (uint64_t)*I + F.BaseOffset; + if (F.BaseGV) ImmCost += 64; // Handle symbolic values conservatively. // TODO: This should probably be the pointer size. else if (Offset != 0) @@ -840,31 +967,24 @@ void Cost::RateFormula(const Formula &F, assert(isValid() && "invalid cost"); } -/// Loose - Set this cost to a losing value. -void Cost::Loose() { +/// Lose - Set this cost to a losing value. +void Cost::Lose() { NumRegs = ~0u; AddRecCost = ~0u; NumIVMuls = ~0u; NumBaseAdds = ~0u; ImmCost = ~0u; SetupCost = ~0u; + ScaleCost = ~0u; } /// operator< - Choose the lower cost. bool Cost::operator<(const Cost &Other) const { - if (NumRegs != Other.NumRegs) - return NumRegs < Other.NumRegs; - if (AddRecCost != Other.AddRecCost) - return AddRecCost < Other.AddRecCost; - if (NumIVMuls != Other.NumIVMuls) - return NumIVMuls < Other.NumIVMuls; - if (NumBaseAdds != Other.NumBaseAdds) - return NumBaseAdds < Other.NumBaseAdds; - if (ImmCost != Other.ImmCost) - return ImmCost < Other.ImmCost; - if (SetupCost != Other.SetupCost) - return SetupCost < Other.SetupCost; - return false; + return std::tie(NumRegs, AddRecCost, NumIVMuls, NumBaseAdds, ScaleCost, + ImmCost, SetupCost) < + std::tie(Other.NumRegs, Other.AddRecCost, Other.NumIVMuls, + Other.NumBaseAdds, Other.ScaleCost, Other.ImmCost, + Other.SetupCost); } void Cost::print(raw_ostream &OS) const { @@ -876,15 +996,19 @@ void Cost::print(raw_ostream &OS) const { if (NumBaseAdds != 0) OS << ", plus " << NumBaseAdds << " base add" << (NumBaseAdds == 1 ? "" : "s"); + if (ScaleCost != 0) + OS << ", plus " << ScaleCost << " scale cost"; if (ImmCost != 0) OS << ", plus " << ImmCost << " imm cost"; if (SetupCost != 0) OS << ", plus " << SetupCost << " setup cost"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Cost::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { @@ -924,7 +1048,8 @@ struct LSRFixup { } LSRFixup::LSRFixup() - : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} + : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)), + Offset(0) {} /// isUseFullyOutsideLoop - Test whether this fixup always uses its /// value outside of the given loop. @@ -946,19 +1071,19 @@ void LSRFixup::print(raw_ostream &OS) const { // Store is common and interesting enough to be worth special-casing. if (StoreInst *Store = dyn_cast(UserInst)) { OS << "store "; - WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); + Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); } else if (UserInst->getType()->isVoidTy()) OS << UserInst->getOpcodeName(); else - WriteAsOperand(OS, UserInst, /*PrintType=*/false); + UserInst->printAsOperand(OS, /*PrintType=*/false); OS << ", OperandValToReplace="; - WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); + OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), E = PostIncLoops.end(); I != E; ++I) { OS << ", PostIncLoop="; - WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); + (*I)->getHeader()->printAsOperand(OS, /*PrintType=*/false); } if (LUIdx != ~size_t(0)) @@ -968,37 +1093,35 @@ void LSRFixup::print(raw_ostream &OS) const { OS << ", Offset=" << Offset; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRFixup::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. struct UniquifierDenseMapInfo { - static SmallVector getEmptyKey() { - SmallVector V; + static SmallVector getEmptyKey() { + SmallVector V; V.push_back(reinterpret_cast(-1)); return V; } - static SmallVector getTombstoneKey() { - SmallVector V; + static SmallVector getTombstoneKey() { + SmallVector V; V.push_back(reinterpret_cast(-2)); return V; } - static unsigned getHashValue(const SmallVector &V) { - unsigned Result = 0; - for (SmallVectorImpl::const_iterator I = V.begin(), - E = V.end(); I != E; ++I) - Result ^= DenseMapInfo::getHashValue(*I); - return Result; + static unsigned getHashValue(const SmallVector &V) { + return static_cast(hash_combine_range(V.begin(), V.end())); } - static bool isEqual(const SmallVector &LHS, - const SmallVector &RHS) { + static bool isEqual(const SmallVector &LHS, + const SmallVector &RHS) { return LHS == RHS; } }; @@ -1009,7 +1132,7 @@ struct UniquifierDenseMapInfo { /// the user itself, and information about how the use may be satisfied. /// TODO: Represent multiple users of the same expression in common? class LSRUse { - DenseSet, UniquifierDenseMapInfo> Uniquifier; + DenseSet, UniquifierDenseMapInfo> Uniquifier; public: /// KindType - An enum for a kind of use, indicating what types of @@ -1022,6 +1145,8 @@ public: // TODO: Add a generic icmp too? }; + typedef PointerIntPair SCEVUseKindPair; + KindType Kind; Type *AccessTy; @@ -1034,6 +1159,13 @@ public: /// may be used. bool AllFixupsOutsideLoop; + /// RigidFormula is set to true to guarantee that this use will be associated + /// with a single formula--the one that initially matched. Some SCEV + /// expressions cannot be expanded. This allows LSR to consider the registers + /// used by those expressions without the need to expand them later after + /// changing the formula. + bool RigidFormula; + /// WidestFixupType - This records the widest use type for any fixup using /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different /// max fixup widths to be equivalent, because the narrower one may be relying @@ -1052,7 +1184,8 @@ public: MinOffset(INT64_MAX), MaxOffset(INT64_MIN), AllFixupsOutsideLoop(true), - WidestFixupType(0) {} + RigidFormula(false), + WidestFixupType(nullptr) {} bool HasFormulaWithSameRegs(const Formula &F) const; bool InsertFormula(const Formula &F); @@ -1068,7 +1201,7 @@ public: /// HasFormula - Test whether this use as a formula which has the same /// registers as the given formula. bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { - SmallVector Key = F.BaseRegs; + SmallVector Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. std::sort(Key.begin(), Key.end()); @@ -1078,7 +1211,10 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { /// InsertFormula - If the given formula has not yet been inserted, add it to /// the list, and return true. Return false otherwise. bool LSRUse::InsertFormula(const Formula &F) { - SmallVector Key = F.BaseRegs; + if (!Formulae.empty() && RigidFormula) + return false; + + SmallVector Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. std::sort(Key.begin(), Key.end()); @@ -1099,7 +1235,6 @@ bool LSRUse::InsertFormula(const Formula &F) { Formulae.push_back(F); // Record registers now being used by this use. - if (F.ScaledReg) Regs.insert(F.ScaledReg); Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); return true; @@ -1110,7 +1245,6 @@ void LSRUse::DeleteFormula(Formula &F) { if (&F != &Formulae.back()) std::swap(F, Formulae.back()); Formulae.pop_back(); - assert(!Formulae.empty() && "LSRUse has no formulae left!"); } /// RecomputeRegs - Recompute the Regs field, and update RegUses. @@ -1150,7 +1284,7 @@ void LSRUse::print(raw_ostream &OS) const { for (SmallVectorImpl::const_iterator I = Offsets.begin(), E = Offsets.end(); I != E; ++I) { OS << *I; - if (llvm::next(I) != E) + if (std::next(I) != E) OS << ','; } OS << '}'; @@ -1162,163 +1296,265 @@ void LSRUse::print(raw_ostream &OS) const { OS << ", widest fixup type: " << *WidestFixupType; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRUse::dump() const { print(errs()); errs() << '\n'; } +#endif /// isLegalUse - Test whether the use described by AM is "legal", meaning it can /// be completely folded into the user instruction at isel time. This includes /// address-mode folding and special icmp tricks. -static bool isLegalUse(const TargetLowering::AddrMode &AM, - LSRUse::KindType Kind, Type *AccessTy, - const TargetLowering *TLI) { +static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind, + Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, + bool HasBaseReg, int64_t Scale) { switch (Kind) { case LSRUse::Address: - // If we have low-level target information, ask the target if it can - // completely fold this address. - if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); + return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); // Otherwise, just guess that reg+reg addressing is legal. - return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; + //return ; case LSRUse::ICmpZero: // There's not even a target hook for querying whether it would be legal to // fold a GV into an ICmp. - if (AM.BaseGV) + if (BaseGV) return false; // ICmp only has two operands; don't allow more than two non-trivial parts. - if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) + if (Scale != 0 && HasBaseReg && BaseOffset != 0) return false; // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by // putting the scaled register in the other operand of the icmp. - if (AM.Scale != 0 && AM.Scale != -1) + if (Scale != 0 && Scale != -1) return false; // If we have low-level target information, ask the target if it can fold an // integer immediate on an icmp. - if (AM.BaseOffs != 0) { - if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); - return false; + if (BaseOffset != 0) { + // We have one of: + // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset + // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset + // Offs is the ICmp immediate. + if (Scale == 0) + // The cast does the right thing with INT64_MIN. + BaseOffset = -(uint64_t)BaseOffset; + return TTI.isLegalICmpImmediate(BaseOffset); } + // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg return true; case LSRUse::Basic: // Only handle single-register values. - return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; + return !BaseGV && Scale == 0 && BaseOffset == 0; case LSRUse::Special: - // Only handle -1 scales, or no scale. - return AM.Scale == 0 || AM.Scale == -1; + // Special case Basic to handle -1 scales. + return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; } - return false; + llvm_unreachable("Invalid LSRUse Kind!"); } -static bool isLegalUse(TargetLowering::AddrMode AM, - int64_t MinOffset, int64_t MaxOffset, - LSRUse::KindType Kind, Type *AccessTy, - const TargetLowering *TLI) { +static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, + int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, + GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, + int64_t Scale) { // Check for overflow. - if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != + if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != (MinOffset > 0)) return false; - AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; - if (isLegalUse(AM, Kind, AccessTy, TLI)) { - AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; - // Check for overflow. - if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != - (MaxOffset > 0)) - return false; - AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; - return isLegalUse(AM, Kind, AccessTy, TLI); + MinOffset = (uint64_t)BaseOffset + MinOffset; + if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != + (MaxOffset > 0)) + return false; + MaxOffset = (uint64_t)BaseOffset + MaxOffset; + + return isLegalUse(TTI, Kind, AccessTy, BaseGV, MinOffset, HasBaseReg, + Scale) && + isLegalUse(TTI, Kind, AccessTy, BaseGV, MaxOffset, HasBaseReg, Scale); +} + +static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, + int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, + const Formula &F) { + return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, + F.BaseOffset, F.HasBaseReg, F.Scale); +} + +static bool isLegal2RegAMUse(const TargetTransformInfo &TTI, const LSRUse &LU, + const Formula &F) { + // If F is used as an Addressing Mode, it may fold one Base plus one + // scaled register. If the scaled register is nil, do as if another + // element of the base regs is a 1-scaled register. + // This is possible if BaseRegs has at least 2 registers. + + // If this is not an address calculation, this is not an addressing mode + // use. + if (LU.Kind != LSRUse::Address) + return false; + + // F is already scaled. + if (F.Scale != 0) + return false; + + // We need to keep one register for the base and one to scale. + if (F.BaseRegs.size() < 2) + return false; + + return isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, + F.BaseGV, F.BaseOffset, F.HasBaseReg, 1); + } + +static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F) { + if (!F.Scale) + return 0; + assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, F) && "Illegal formula in use."); + + switch (LU.Kind) { + case LSRUse::Address: { + // Check the scaling factor cost with both the min and max offsets. + int ScaleCostMinOffset = + TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV, + F.BaseOffset + LU.MinOffset, + F.HasBaseReg, F.Scale); + int ScaleCostMaxOffset = + TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV, + F.BaseOffset + LU.MaxOffset, + F.HasBaseReg, F.Scale); + + assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && + "Legal addressing mode has an illegal cost!"); + return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); } - return false; + case LSRUse::ICmpZero: + // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg. + // Therefore, return 0 in case F.Scale == -1. + return F.Scale != -1; + + case LSRUse::Basic: + case LSRUse::Special: + return 0; + } + + llvm_unreachable("Invalid LSRUse Kind!"); } -static bool isAlwaysFoldable(int64_t BaseOffs, - GlobalValue *BaseGV, - bool HasBaseReg, +static bool isAlwaysFoldable(const TargetTransformInfo &TTI, LSRUse::KindType Kind, Type *AccessTy, - const TargetLowering *TLI) { + GlobalValue *BaseGV, int64_t BaseOffset, + bool HasBaseReg) { // Fast-path: zero is always foldable. - if (BaseOffs == 0 && !BaseGV) return true; + if (BaseOffset == 0 && !BaseGV) return true; // Conservatively, create an address with an immediate and a // base and a scale. - TargetLowering::AddrMode AM; - AM.BaseOffs = BaseOffs; - AM.BaseGV = BaseGV; - AM.HasBaseReg = HasBaseReg; - AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; + int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; // Canonicalize a scale of 1 to a base register if the formula doesn't // already have a base register. - if (!AM.HasBaseReg && AM.Scale == 1) { - AM.Scale = 0; - AM.HasBaseReg = true; + if (!HasBaseReg && Scale == 1) { + Scale = 0; + HasBaseReg = true; } - return isLegalUse(AM, Kind, AccessTy, TLI); + return isLegalUse(TTI, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); } -static bool isAlwaysFoldable(const SCEV *S, - int64_t MinOffset, int64_t MaxOffset, - bool HasBaseReg, - LSRUse::KindType Kind, Type *AccessTy, - const TargetLowering *TLI, - ScalarEvolution &SE) { +static bool isAlwaysFoldable(const TargetTransformInfo &TTI, + ScalarEvolution &SE, int64_t MinOffset, + int64_t MaxOffset, LSRUse::KindType Kind, + Type *AccessTy, const SCEV *S, bool HasBaseReg) { // Fast-path: zero is always foldable. if (S->isZero()) return true; // Conservatively, create an address with an immediate and a // base and a scale. - int64_t BaseOffs = ExtractImmediate(S, SE); + int64_t BaseOffset = ExtractImmediate(S, SE); GlobalValue *BaseGV = ExtractSymbol(S, SE); // If there's anything else involved, it's not foldable. if (!S->isZero()) return false; // Fast-path: zero is always foldable. - if (BaseOffs == 0 && !BaseGV) return true; + if (BaseOffset == 0 && !BaseGV) return true; // Conservatively, create an address with an immediate and a // base and a scale. - TargetLowering::AddrMode AM; - AM.BaseOffs = BaseOffs; - AM.BaseGV = BaseGV; - AM.HasBaseReg = HasBaseReg; - AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; + int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; - return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); + return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, + BaseOffset, HasBaseReg, Scale); } namespace { -/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding -/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. -struct UseMapDenseMapInfo { - static std::pair getEmptyKey() { - return std::make_pair(reinterpret_cast(-1), LSRUse::Basic); - } +/// IVInc - An individual increment in a Chain of IV increments. +/// Relate an IV user to an expression that computes the IV it uses from the IV +/// used by the previous link in the Chain. +/// +/// For the head of a chain, IncExpr holds the absolute SCEV expression for the +/// original IVOperand. The head of the chain's IVOperand is only valid during +/// chain collection, before LSR replaces IV users. During chain generation, +/// IncExpr can be used to find the new IVOperand that computes the same +/// expression. +struct IVInc { + Instruction *UserInst; + Value* IVOperand; + const SCEV *IncExpr; - static std::pair getTombstoneKey() { - return std::make_pair(reinterpret_cast(-2), LSRUse::Basic); - } + IVInc(Instruction *U, Value *O, const SCEV *E): + UserInst(U), IVOperand(O), IncExpr(E) {} +}; - static unsigned - getHashValue(const std::pair &V) { - unsigned Result = DenseMapInfo::getHashValue(V.first); - Result ^= DenseMapInfo::getHashValue(unsigned(V.second)); - return Result; - } +// IVChain - The list of IV increments in program order. +// We typically add the head of a chain without finding subsequent links. +struct IVChain { + SmallVector Incs; + const SCEV *ExprBase; - static bool isEqual(const std::pair &LHS, - const std::pair &RHS) { - return LHS == RHS; + IVChain() : ExprBase(nullptr) {} + + IVChain(const IVInc &Head, const SCEV *Base) + : Incs(1, Head), ExprBase(Base) {} + + typedef SmallVectorImpl::const_iterator const_iterator; + + // begin - return the first increment in the chain. + const_iterator begin() const { + assert(!Incs.empty()); + return std::next(Incs.begin()); + } + const_iterator end() const { + return Incs.end(); } + + // hasIncs - Returns true if this chain contains any increments. + bool hasIncs() const { return Incs.size() >= 2; } + + // add - Add an IVInc to the end of this chain. + void add(const IVInc &X) { Incs.push_back(X); } + + // tailUserInst - Returns the last UserInst in the chain. + Instruction *tailUserInst() const { return Incs.back().UserInst; } + + // isProfitableIncrement - Returns true if IncExpr can be profitably added to + // this chain. + bool isProfitableIncrement(const SCEV *OperExpr, + const SCEV *IncExpr, + ScalarEvolution&); +}; + +/// ChainUsers - Helper for CollectChains to track multiple IV increment uses. +/// Distinguish between FarUsers that definitely cross IV increments and +/// NearUsers that may be used between IV increments. +struct ChainUsers { + SmallPtrSet FarUsers; + SmallPtrSet NearUsers; }; /// LSRInstance - This class holds state for the main loop strength reduction @@ -1328,7 +1564,7 @@ class LSRInstance { ScalarEvolution &SE; DominatorTree &DT; LoopInfo &LI; - const TargetLowering *const TLI; + const TargetTransformInfo &TTI; Loop *const L; bool Changed; @@ -1353,11 +1589,29 @@ class LSRInstance { /// RegUses - Track which uses use which register candidates. RegUseTracker RegUses; + // Limit the number of chains to avoid quadratic behavior. We don't expect to + // have more than a few IV increment chains in a loop. Missing a Chain falls + // back to normal LSR behavior for those uses. + static const unsigned MaxChains = 8; + + /// IVChainVec - IV users can form a chain of IV increments. + SmallVector IVChainVec; + + /// IVIncSet - IV users that belong to profitable IVChains. + SmallPtrSet IVIncSet; + void OptimizeShadowIV(); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); void OptimizeLoopTermCond(); + void ChainInstruction(Instruction *UserInst, Instruction *IVOper, + SmallVectorImpl &ChainUsersVec); + void FinalizeChain(IVChain &Chain); + void CollectChains(); + void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts); + void CollectInterestingTypesAndFactors(); void CollectFixupsAndInitialFormulae(); @@ -1367,9 +1621,7 @@ class LSRInstance { } // Support for sharing of LSRUses between LSRFixups. - typedef DenseMap, - size_t, - UseMapDenseMapInfo> UseMapTy; + typedef DenseMap UseMapTy; UseMapTy UseMap; bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, @@ -1383,7 +1635,6 @@ class LSRInstance { LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); -public: void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void CountRegisters(const Formula &F, size_t LUIdx); @@ -1422,9 +1673,11 @@ public: BasicBlock::iterator HoistInsertPosition(BasicBlock::iterator IP, const SmallVectorImpl &Inputs) const; - BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, - const LSRFixup &LF, - const LSRUse &LU) const; + BasicBlock::iterator + AdjustInsertPositionForExpand(BasicBlock::iterator IP, + const LSRFixup &LF, + const LSRUse &LU, + SCEVExpander &Rewriter) const; Value *Expand(const LSRFixup &LF, const Formula &F, @@ -1444,7 +1697,8 @@ public: void ImplementSolution(const SmallVectorImpl &Solution, Pass *P); - LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); +public: + LSRInstance(Loop *L, Pass *P); bool getChanged() const { return Changed; } @@ -1469,7 +1723,7 @@ void LSRInstance::OptimizeShadowIV() { IVUsers::const_iterator CandidateUI = UI; ++UI; Instruction *ShadowUse = CandidateUI->getUser(); - Type *DestTy = NULL; + Type *DestTy = nullptr; bool IsSigned = false; /* If shadow use is a int->float cast then insert a second IV @@ -1494,12 +1748,9 @@ void LSRInstance::OptimizeShadowIV() { } if (!DestTy) continue; - if (TLI) { - // If target does not support DestTy natively then do not apply - // this transformation. - EVT DVT = TLI->getValueType(DestTy); - if (!TLI->isTypeLegal(DVT)) continue; - } + // If target does not support DestTy natively then do not apply + // this transformation. + if (!TTI.isTypeLegal(DestTy)) continue; PHINode *PH = dyn_cast(ShadowUse->getOperand(0)); if (!PH) continue; @@ -1534,7 +1785,7 @@ void LSRInstance::OptimizeShadowIV() { continue; /* Initialize new IV, double d = 0.0 in above example. */ - ConstantInt *C = NULL; + ConstantInt *C = nullptr; if (Incr->getOperand(0) == PH) C = dyn_cast(Incr->getOperand(1)); else if (Incr->getOperand(1) == PH) @@ -1656,7 +1907,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { // for ICMP_ULE here because the comparison would be with zero, which // isn't interesting. CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; - const SCEVNAryExpr *Max = 0; + const SCEVNAryExpr *Max = nullptr; if (const SCEVSMaxExpr *S = dyn_cast(BackedgeTakenCount)) { Pred = ICmpInst::ICMP_SLE; Max = S; @@ -1699,19 +1950,17 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { // Check the right operand of the select, and remember it, as it will // be used in the new comparison instruction. - Value *NewRHS = 0; + Value *NewRHS = nullptr; if (ICmpInst::isTrueWhenEqual(Pred)) { // Look for n+1, and grab n. if (AddOperator *BO = dyn_cast(Sel->getOperand(1))) - if (isa(BO->getOperand(1)) && - cast(BO->getOperand(1))->isOne() && - SE.getSCEV(BO->getOperand(0)) == MaxRHS) - NewRHS = BO->getOperand(0); + if (ConstantInt *BO1 = dyn_cast(BO->getOperand(1))) + if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) + NewRHS = BO->getOperand(0); if (AddOperator *BO = dyn_cast(Sel->getOperand(2))) - if (isa(BO->getOperand(1)) && - cast(BO->getOperand(1))->isOne() && - SE.getSCEV(BO->getOperand(0)) == MaxRHS) - NewRHS = BO->getOperand(0); + if (ConstantInt *BO1 = dyn_cast(BO->getOperand(1))) + if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) + NewRHS = BO->getOperand(0); if (!NewRHS) return Cond; } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) @@ -1771,7 +2020,7 @@ LSRInstance::OptimizeLoopTermCond() { continue; // Search IVUsesByStride to find Cond's IVUse if there is one. - IVStrideUse *CondUse = 0; + IVStrideUse *CondUse = nullptr; ICmpInst *Cond = cast(TermBr->getCondition()); if (!FindIVUserForCond(Cond, CondUse)) continue; @@ -1821,18 +2070,17 @@ LSRInstance::OptimizeLoopTermCond() { if (C->getValue().getMinSignedBits() >= 64 || C->getValue().isMinSignedValue()) goto decline_post_inc; - // Without TLI, assume that any stride might be valid, and so any - // use might be shared. - if (!TLI) - goto decline_post_inc; // Check for possible scaled-address reuse. Type *AccessTy = getAccessType(UI->getUser()); - TargetLowering::AddrMode AM; - AM.Scale = C->getSExtValue(); - if (TLI->isLegalAddressingMode(AM, AccessTy)) + int64_t Scale = C->getSExtValue(); + if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr, + /*BaseOffset=*/ 0, + /*HasBaseReg=*/ false, Scale)) goto decline_post_inc; - AM.Scale = -AM.Scale; - if (TLI->isLegalAddressingMode(AM, AccessTy)) + Scale = -Scale; + if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr, + /*BaseOffset=*/ 0, + /*HasBaseReg=*/ false, Scale)) goto decline_post_inc; } } @@ -1902,13 +2150,13 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, return false; // Conservatively assume HasBaseReg is true for now. if (NewOffset < LU.MinOffset) { - if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, - Kind, AccessTy, TLI)) + if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, + LU.MaxOffset - NewOffset, HasBaseReg)) return false; NewMinOffset = NewOffset; } else if (NewOffset > LU.MaxOffset) { - if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, - Kind, AccessTy, TLI)) + if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, + NewOffset - LU.MinOffset, HasBaseReg)) return false; NewMaxOffset = NewOffset; } @@ -1937,13 +2185,14 @@ LSRInstance::getUse(const SCEV *&Expr, int64_t Offset = ExtractImmediate(Expr, SE); // Basic uses can't accept any offset, for example. - if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { + if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, + Offset, /*HasBaseReg=*/ true)) { Expr = Copy; Offset = 0; } std::pair P = - UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); + UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); if (!P.second) { // A use already existed with this base. size_t LUIdx = P.first->second; @@ -2005,14 +2254,14 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, // as OrigF. if (F.BaseRegs == OrigF.BaseRegs && F.ScaledReg == OrigF.ScaledReg && - F.AM.BaseGV == OrigF.AM.BaseGV && - F.AM.Scale == OrigF.AM.Scale && + F.BaseGV == OrigF.BaseGV && + F.Scale == OrigF.Scale && F.UnfoldedOffset == OrigF.UnfoldedOffset) { - if (F.AM.BaseOffs == 0) + if (F.BaseOffset == 0) return &LU; // This is the formula where all the registers and symbols matched; // there aren't going to be any others. Since we declined it, we - // can skip the rest of the formulae and procede to the next LSRUse. + // can skip the rest of the formulae and proceed to the next LSRUse. break; } } @@ -2020,7 +2269,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, } // Nothing looked good. - return 0; + return nullptr; } void LSRInstance::CollectInterestingTypesAndFactors() { @@ -2039,7 +2288,8 @@ void LSRInstance::CollectInterestingTypesAndFactors() { do { const SCEV *S = Worklist.pop_back_val(); if (const SCEVAddRecExpr *AR = dyn_cast(S)) { - Strides.insert(AR->getStepRecurrence(SE)); + if (AR->getLoop() == L) + Strides.insert(AR->getStepRecurrence(SE)); Worklist.push_back(AR->getStart()); } else if (const SCEVAddExpr *Add = dyn_cast(S)) { Worklist.append(Add->op_begin(), Add->op_end()); @@ -2051,7 +2301,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() { for (SmallSetVector::const_iterator I = Strides.begin(), E = Strides.end(); I != E; ++I) for (SmallSetVector::const_iterator NewStrideIter = - llvm::next(I); NewStrideIter != E; ++NewStrideIter) { + std::next(I); NewStrideIter != E; ++NewStrideIter) { const SCEV *OldStride = *I; const SCEV *NewStride = *NewStrideIter; @@ -2085,16 +2335,560 @@ void LSRInstance::CollectInterestingTypesAndFactors() { DEBUG(print_factors_and_types(dbgs())); } +/// findIVOperand - Helper for CollectChains that finds an IV operand (computed +/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped +/// Instructions to IVStrideUses, we could partially skip this. +static User::op_iterator +findIVOperand(User::op_iterator OI, User::op_iterator OE, + Loop *L, ScalarEvolution &SE) { + for(; OI != OE; ++OI) { + if (Instruction *Oper = dyn_cast(*OI)) { + if (!SE.isSCEVable(Oper->getType())) + continue; + + if (const SCEVAddRecExpr *AR = + dyn_cast(SE.getSCEV(Oper))) { + if (AR->getLoop() == L) + break; + } + } + } + return OI; +} + +/// getWideOperand - IVChain logic must consistenctly peek base TruncInst +/// operands, so wrap it in a convenient helper. +static Value *getWideOperand(Value *Oper) { + if (TruncInst *Trunc = dyn_cast(Oper)) + return Trunc->getOperand(0); + return Oper; +} + +/// isCompatibleIVType - Return true if we allow an IV chain to include both +/// types. +static bool isCompatibleIVType(Value *LVal, Value *RVal) { + Type *LType = LVal->getType(); + Type *RType = RVal->getType(); + return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); +} + +/// getExprBase - Return an approximation of this SCEV expression's "base", or +/// NULL for any constant. Returning the expression itself is +/// conservative. Returning a deeper subexpression is more precise and valid as +/// long as it isn't less complex than another subexpression. For expressions +/// involving multiple unscaled values, we need to return the pointer-type +/// SCEVUnknown. This avoids forming chains across objects, such as: +/// PrevOper==a[i], IVOper==b[i], IVInc==b-a. +/// +/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost +/// SCEVUnknown, we simply return the rightmost SCEV operand. +static const SCEV *getExprBase(const SCEV *S) { + switch (S->getSCEVType()) { + default: // uncluding scUnknown. + return S; + case scConstant: + return nullptr; + case scTruncate: + return getExprBase(cast(S)->getOperand()); + case scZeroExtend: + return getExprBase(cast(S)->getOperand()); + case scSignExtend: + return getExprBase(cast(S)->getOperand()); + case scAddExpr: { + // Skip over scaled operands (scMulExpr) to follow add operands as long as + // there's nothing more complex. + // FIXME: not sure if we want to recognize negation. + const SCEVAddExpr *Add = cast(S); + for (std::reverse_iterator I(Add->op_end()), + E(Add->op_begin()); I != E; ++I) { + const SCEV *SubExpr = *I; + if (SubExpr->getSCEVType() == scAddExpr) + return getExprBase(SubExpr); + + if (SubExpr->getSCEVType() != scMulExpr) + return SubExpr; + } + return S; // all operands are scaled, be conservative. + } + case scAddRecExpr: + return getExprBase(cast(S)->getStart()); + } +} + +/// Return true if the chain increment is profitable to expand into a loop +/// invariant value, which may require its own register. A profitable chain +/// increment will be an offset relative to the same base. We allow such offsets +/// to potentially be used as chain increment as long as it's not obviously +/// expensive to expand using real instructions. +bool IVChain::isProfitableIncrement(const SCEV *OperExpr, + const SCEV *IncExpr, + ScalarEvolution &SE) { + // Aggressively form chains when -stress-ivchain. + if (StressIVChain) + return true; + + // Do not replace a constant offset from IV head with a nonconstant IV + // increment. + if (!isa(IncExpr)) { + const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); + if (isa(SE.getMinusSCEV(OperExpr, HeadExpr))) + return 0; + } + + SmallPtrSet Processed; + return !isHighCostExpansion(IncExpr, Processed, SE); +} + +/// Return true if the number of registers needed for the chain is estimated to +/// be less than the number required for the individual IV users. First prohibit +/// any IV users that keep the IV live across increments (the Users set should +/// be empty). Next count the number and type of increments in the chain. +/// +/// Chaining IVs can lead to considerable code bloat if ISEL doesn't +/// effectively use postinc addressing modes. Only consider it profitable it the +/// increments can be computed in fewer registers when chained. +/// +/// TODO: Consider IVInc free if it's already used in another chains. +static bool +isProfitableChain(IVChain &Chain, SmallPtrSet &Users, + ScalarEvolution &SE, const TargetTransformInfo &TTI) { + if (StressIVChain) + return true; + + if (!Chain.hasIncs()) + return false; + + if (!Users.empty()) { + DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; + for (SmallPtrSet::const_iterator I = Users.begin(), + E = Users.end(); I != E; ++I) { + dbgs() << " " << **I << "\n"; + }); + return false; + } + assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); + + // The chain itself may require a register, so intialize cost to 1. + int cost = 1; + + // A complete chain likely eliminates the need for keeping the original IV in + // a register. LSR does not currently know how to form a complete chain unless + // the header phi already exists. + if (isa(Chain.tailUserInst()) + && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { + --cost; + } + const SCEV *LastIncExpr = nullptr; + unsigned NumConstIncrements = 0; + unsigned NumVarIncrements = 0; + unsigned NumReusedIncrements = 0; + for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); + I != E; ++I) { + + if (I->IncExpr->isZero()) + continue; + + // Incrementing by zero or some constant is neutral. We assume constants can + // be folded into an addressing mode or an add's immediate operand. + if (isa(I->IncExpr)) { + ++NumConstIncrements; + continue; + } + + if (I->IncExpr == LastIncExpr) + ++NumReusedIncrements; + else + ++NumVarIncrements; + + LastIncExpr = I->IncExpr; + } + // An IV chain with a single increment is handled by LSR's postinc + // uses. However, a chain with multiple increments requires keeping the IV's + // value live longer than it needs to be if chained. + if (NumConstIncrements > 1) + --cost; + + // Materializing increment expressions in the preheader that didn't exist in + // the original code may cost a register. For example, sign-extended array + // indices can produce ridiculous increments like this: + // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) + cost += NumVarIncrements; + + // Reusing variable increments likely saves a register to hold the multiple of + // the stride. + cost -= NumReusedIncrements; + + DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost + << "\n"); + + return cost < 0; +} + +/// ChainInstruction - Add this IV user to an existing chain or make it the head +/// of a new chain. +void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, + SmallVectorImpl &ChainUsersVec) { + // When IVs are used as types of varying widths, they are generally converted + // to a wider type with some uses remaining narrow under a (free) trunc. + Value *const NextIV = getWideOperand(IVOper); + const SCEV *const OperExpr = SE.getSCEV(NextIV); + const SCEV *const OperExprBase = getExprBase(OperExpr); + + // Visit all existing chains. Check if its IVOper can be computed as a + // profitable loop invariant increment from the last link in the Chain. + unsigned ChainIdx = 0, NChains = IVChainVec.size(); + const SCEV *LastIncExpr = nullptr; + for (; ChainIdx < NChains; ++ChainIdx) { + IVChain &Chain = IVChainVec[ChainIdx]; + + // Prune the solution space aggressively by checking that both IV operands + // are expressions that operate on the same unscaled SCEVUnknown. This + // "base" will be canceled by the subsequent getMinusSCEV call. Checking + // first avoids creating extra SCEV expressions. + if (!StressIVChain && Chain.ExprBase != OperExprBase) + continue; + + Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); + if (!isCompatibleIVType(PrevIV, NextIV)) + continue; + + // A phi node terminates a chain. + if (isa(UserInst) && isa(Chain.tailUserInst())) + continue; + + // The increment must be loop-invariant so it can be kept in a register. + const SCEV *PrevExpr = SE.getSCEV(PrevIV); + const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); + if (!SE.isLoopInvariant(IncExpr, L)) + continue; + + if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { + LastIncExpr = IncExpr; + break; + } + } + // If we haven't found a chain, create a new one, unless we hit the max. Don't + // bother for phi nodes, because they must be last in the chain. + if (ChainIdx == NChains) { + if (isa(UserInst)) + return; + if (NChains >= MaxChains && !StressIVChain) { + DEBUG(dbgs() << "IV Chain Limit\n"); + return; + } + LastIncExpr = OperExpr; + // IVUsers may have skipped over sign/zero extensions. We don't currently + // attempt to form chains involving extensions unless they can be hoisted + // into this loop's AddRec. + if (!isa(LastIncExpr)) + return; + ++NChains; + IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), + OperExprBase)); + ChainUsersVec.resize(NChains); + DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst + << ") IV=" << *LastIncExpr << "\n"); + } else { + DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst + << ") IV+" << *LastIncExpr << "\n"); + // Add this IV user to the end of the chain. + IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); + } + IVChain &Chain = IVChainVec[ChainIdx]; + + SmallPtrSet &NearUsers = ChainUsersVec[ChainIdx].NearUsers; + // This chain's NearUsers become FarUsers. + if (!LastIncExpr->isZero()) { + ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), + NearUsers.end()); + NearUsers.clear(); + } + + // All other uses of IVOperand become near uses of the chain. + // We currently ignore intermediate values within SCEV expressions, assuming + // they will eventually be used be the current chain, or can be computed + // from one of the chain increments. To be more precise we could + // transitively follow its user and only add leaf IV users to the set. + for (User *U : IVOper->users()) { + Instruction *OtherUse = dyn_cast(U); + if (!OtherUse) + continue; + // Uses in the chain will no longer be uses if the chain is formed. + // Include the head of the chain in this iteration (not Chain.begin()). + IVChain::const_iterator IncIter = Chain.Incs.begin(); + IVChain::const_iterator IncEnd = Chain.Incs.end(); + for( ; IncIter != IncEnd; ++IncIter) { + if (IncIter->UserInst == OtherUse) + break; + } + if (IncIter != IncEnd) + continue; + + if (SE.isSCEVable(OtherUse->getType()) + && !isa(SE.getSCEV(OtherUse)) + && IU.isIVUserOrOperand(OtherUse)) { + continue; + } + NearUsers.insert(OtherUse); + } + + // Since this user is part of the chain, it's no longer considered a use + // of the chain. + ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); +} + +/// CollectChains - Populate the vector of Chains. +/// +/// This decreases ILP at the architecture level. Targets with ample registers, +/// multiple memory ports, and no register renaming probably don't want +/// this. However, such targets should probably disable LSR altogether. +/// +/// The job of LSR is to make a reasonable choice of induction variables across +/// the loop. Subsequent passes can easily "unchain" computation exposing more +/// ILP *within the loop* if the target wants it. +/// +/// Finding the best IV chain is potentially a scheduling problem. Since LSR +/// will not reorder memory operations, it will recognize this as a chain, but +/// will generate redundant IV increments. Ideally this would be corrected later +/// by a smart scheduler: +/// = A[i] +/// = A[i+x] +/// A[i] = +/// A[i+x] = +/// +/// TODO: Walk the entire domtree within this loop, not just the path to the +/// loop latch. This will discover chains on side paths, but requires +/// maintaining multiple copies of the Chains state. +void LSRInstance::CollectChains() { + DEBUG(dbgs() << "Collecting IV Chains.\n"); + SmallVector ChainUsersVec; + + SmallVector LatchPath; + BasicBlock *LoopHeader = L->getHeader(); + for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); + Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { + LatchPath.push_back(Rung->getBlock()); + } + LatchPath.push_back(LoopHeader); + + // Walk the instruction stream from the loop header to the loop latch. + for (SmallVectorImpl::reverse_iterator + BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend(); + BBIter != BBEnd; ++BBIter) { + for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); + I != E; ++I) { + // Skip instructions that weren't seen by IVUsers analysis. + if (isa(I) || !IU.isIVUserOrOperand(I)) + continue; + + // Ignore users that are part of a SCEV expression. This way we only + // consider leaf IV Users. This effectively rediscovers a portion of + // IVUsers analysis but in program order this time. + if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(I))) + continue; + + // Remove this instruction from any NearUsers set it may be in. + for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); + ChainIdx < NChains; ++ChainIdx) { + ChainUsersVec[ChainIdx].NearUsers.erase(I); + } + // Search for operands that can be chained. + SmallPtrSet UniqueOperands; + User::op_iterator IVOpEnd = I->op_end(); + User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); + while (IVOpIter != IVOpEnd) { + Instruction *IVOpInst = cast(*IVOpIter); + if (UniqueOperands.insert(IVOpInst)) + ChainInstruction(I, IVOpInst, ChainUsersVec); + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); + } + } // Continue walking down the instructions. + } // Continue walking down the domtree. + // Visit phi backedges to determine if the chain can generate the IV postinc. + for (BasicBlock::iterator I = L->getHeader()->begin(); + PHINode *PN = dyn_cast(I); ++I) { + if (!SE.isSCEVable(PN->getType())) + continue; + + Instruction *IncV = + dyn_cast(PN->getIncomingValueForBlock(L->getLoopLatch())); + if (IncV) + ChainInstruction(PN, IncV, ChainUsersVec); + } + // Remove any unprofitable chains. + unsigned ChainIdx = 0; + for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); + UsersIdx < NChains; ++UsersIdx) { + if (!isProfitableChain(IVChainVec[UsersIdx], + ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) + continue; + // Preserve the chain at UsesIdx. + if (ChainIdx != UsersIdx) + IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; + FinalizeChain(IVChainVec[ChainIdx]); + ++ChainIdx; + } + IVChainVec.resize(ChainIdx); +} + +void LSRInstance::FinalizeChain(IVChain &Chain) { + assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); + DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); + + for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); + I != E; ++I) { + DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n"); + User::op_iterator UseI = + std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand); + assert(UseI != I->UserInst->op_end() && "cannot find IV operand"); + IVIncSet.insert(UseI); + } +} + +/// Return true if the IVInc can be folded into an addressing mode. +static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, + Value *Operand, const TargetTransformInfo &TTI) { + const SCEVConstant *IncConst = dyn_cast(IncExpr); + if (!IncConst || !isAddressUse(UserInst, Operand)) + return false; + + if (IncConst->getValue()->getValue().getMinSignedBits() > 64) + return false; + + int64_t IncOffset = IncConst->getValue()->getSExtValue(); + if (!isAlwaysFoldable(TTI, LSRUse::Address, + getAccessType(UserInst), /*BaseGV=*/ nullptr, + IncOffset, /*HaseBaseReg=*/ false)) + return false; + + return true; +} + +/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to +/// materialize the IV user's operand from the previous IV user's operand. +void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts) { + // Find the new IVOperand for the head of the chain. It may have been replaced + // by LSR. + const IVInc &Head = Chain.Incs[0]; + User::op_iterator IVOpEnd = Head.UserInst->op_end(); + // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. + User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), + IVOpEnd, L, SE); + Value *IVSrc = nullptr; + while (IVOpIter != IVOpEnd) { + IVSrc = getWideOperand(*IVOpIter); + + // If this operand computes the expression that the chain needs, we may use + // it. (Check this after setting IVSrc which is used below.) + // + // Note that if Head.IncExpr is wider than IVSrc, then this phi is too + // narrow for the chain, so we can no longer use it. We do allow using a + // wider phi, assuming the LSR checked for free truncation. In that case we + // should already have a truncate on this operand such that + // getSCEV(IVSrc) == IncExpr. + if (SE.getSCEV(*IVOpIter) == Head.IncExpr + || SE.getSCEV(IVSrc) == Head.IncExpr) { + break; + } + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); + } + if (IVOpIter == IVOpEnd) { + // Gracefully give up on this chain. + DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); + return; + } + + DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); + Type *IVTy = IVSrc->getType(); + Type *IntTy = SE.getEffectiveSCEVType(IVTy); + const SCEV *LeftOverExpr = nullptr; + for (IVChain::const_iterator IncI = Chain.begin(), + IncE = Chain.end(); IncI != IncE; ++IncI) { + + Instruction *InsertPt = IncI->UserInst; + if (isa(InsertPt)) + InsertPt = L->getLoopLatch()->getTerminator(); + + // IVOper will replace the current IV User's operand. IVSrc is the IV + // value currently held in a register. + Value *IVOper = IVSrc; + if (!IncI->IncExpr->isZero()) { + // IncExpr was the result of subtraction of two narrow values, so must + // be signed. + const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy); + LeftOverExpr = LeftOverExpr ? + SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; + } + if (LeftOverExpr && !LeftOverExpr->isZero()) { + // Expand the IV increment. + Rewriter.clearPostInc(); + Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); + const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), + SE.getUnknown(IncV)); + IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); + + // If an IV increment can't be folded, use it as the next IV value. + if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, + TTI)) { + assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); + IVSrc = IVOper; + LeftOverExpr = nullptr; + } + } + Type *OperTy = IncI->IVOperand->getType(); + if (IVTy != OperTy) { + assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && + "cannot extend a chained IV"); + IRBuilder<> Builder(InsertPt); + IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); + } + IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper); + DeadInsts.push_back(IncI->IVOperand); + } + // If LSR created a new, wider phi, we may also replace its postinc. We only + // do this if we also found a wide value for the head of the chain. + if (isa(Chain.tailUserInst())) { + for (BasicBlock::iterator I = L->getHeader()->begin(); + PHINode *Phi = dyn_cast(I); ++I) { + if (!isCompatibleIVType(Phi, IVSrc)) + continue; + Instruction *PostIncV = dyn_cast( + Phi->getIncomingValueForBlock(L->getLoopLatch())); + if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) + continue; + Value *IVOper = IVSrc; + Type *PostIncTy = PostIncV->getType(); + if (IVTy != PostIncTy) { + assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); + IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); + Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); + IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); + } + Phi->replaceUsesOfWith(PostIncV, IVOper); + DeadInsts.push_back(PostIncV); + } + } +} + void LSRInstance::CollectFixupsAndInitialFormulae() { for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + Instruction *UserInst = UI->getUser(); + // Skip IV users that are part of profitable IV Chains. + User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), + UI->getOperandValToReplace()); + assert(UseI != UserInst->op_end() && "cannot find IV operand"); + if (IVIncSet.count(UseI)) + continue; + // Record the uses. LSRFixup &LF = getNewFixup(); - LF.UserInst = UI->getUser(); + LF.UserInst = UserInst; LF.OperandValToReplace = UI->getOperandValToReplace(); LF.PostIncLoops = UI->getPostIncLoops(); LSRUse::KindType Kind = LSRUse::Basic; - Type *AccessTy = 0; + Type *AccessTy = nullptr; if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { Kind = LSRUse::Address; AccessTy = getAccessType(LF.UserInst); @@ -2122,10 +2916,10 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); - if (SE.isLoopInvariant(N, L)) { + if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { // S is normalized, so normalize N before folding it into S // to keep the result normalized. - N = TransformForPostIncUse(Normalize, N, CI, 0, + N = TransformForPostIncUse(Normalize, N, CI, nullptr, LF.PostIncLoops, SE, DT); Kind = LSRUse::ICmpZero; S = SE.getMinusSCEV(N, S); @@ -2165,6 +2959,10 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { /// and loop-computable portions. void LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { + // Mark uses whose expressions cannot be expanded. + if (!isSafeToExpand(S, SE)) + LU.RigidFormula = true; + Formula F; F.InitialMatch(S, L, SE); bool Inserted = InsertFormula(LU, LUIdx, F); @@ -2178,7 +2976,7 @@ LSRInstance::InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { Formula F; F.BaseRegs.push_back(S); - F.AM.HasBaseReg = true; + F.HasBaseReg = true; bool Inserted = InsertFormula(LU, LUIdx, F); assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; } @@ -2223,18 +3021,17 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { else if (const SCEVUDivExpr *D = dyn_cast(S)) { Worklist.push_back(D->getLHS()); Worklist.push_back(D->getRHS()); - } else if (const SCEVUnknown *U = dyn_cast(S)) { - if (!Inserted.insert(U)) continue; - const Value *V = U->getValue(); + } else if (const SCEVUnknown *US = dyn_cast(S)) { + if (!Inserted.insert(US)) continue; + const Value *V = US->getValue(); if (const Instruction *Inst = dyn_cast(V)) { // Look for instructions defined outside the loop. if (L->contains(Inst)) continue; } else if (isa(V)) // Undef doesn't have a live range, so it doesn't matter. continue; - for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); - UI != UE; ++UI) { - const Instruction *UserInst = dyn_cast(*UI); + for (const Use &U : V->uses()) { + const Instruction *UserInst = dyn_cast(U.getUser()); // Ignore non-instructions. if (!UserInst) continue; @@ -2246,7 +3043,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { const BasicBlock *UseBB = !isa(UserInst) ? UserInst->getParent() : cast(UserInst)->getIncomingBlock( - PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); + PHINode::getIncomingValueNumForOperand(U.getOperandNo())); if (!DT.dominates(L->getHeader(), UseBB)) continue; // Ignore uses which are part of other SCEV expressions, to avoid @@ -2256,7 +3053,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { // If the user is a no-op, look through to its uses. if (!isa(UserS)) continue; - if (UserS == U) { + if (UserS == US) { Worklist.push_back( SE.getUnknown(const_cast(UserInst))); continue; @@ -2264,7 +3061,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { } // Ignore icmp instructions which are already being analyzed. if (const ICmpInst *ICI = dyn_cast(UserInst)) { - unsigned OtherIdx = !UI.getOperandNo(); + unsigned OtherIdx = !U.getOperandNo(); Value *OtherOp = const_cast(ICI->getOperand(OtherIdx)); if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) continue; @@ -2272,8 +3069,8 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { LSRFixup &LF = getNewFixup(); LF.UserInst = const_cast(UserInst); - LF.OperandValToReplace = UI.getUse(); - std::pair P = getUse(S, LSRUse::Basic, 0); + LF.OperandValToReplace = U; + std::pair P = getUse(S, LSRUse::Basic, nullptr); LF.LUIdx = P.first; LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; @@ -2282,7 +3079,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { SE.getTypeSizeInBits(LU.WidestFixupType) < SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) LU.WidestFixupType = LF.OperandValToReplace->getType(); - InsertSupplementalFormula(U, LU, LF.LUIdx); + InsertSupplementalFormula(US, LU, LF.LUIdx); CountRegisters(LU.Formulae.back(), Uses.size() - 1); break; } @@ -2292,42 +3089,64 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { /// CollectSubexprs - Split S into subexpressions which can be pulled out into /// separate registers. If C is non-null, multiply each subexpression by C. -static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, - SmallVectorImpl &Ops, - const Loop *L, - ScalarEvolution &SE) { +/// +/// Return remainder expression after factoring the subexpressions captured by +/// Ops. If Ops is complete, return NULL. +static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, + SmallVectorImpl &Ops, + const Loop *L, + ScalarEvolution &SE, + unsigned Depth = 0) { + // Arbitrarily cap recursion to protect compile time. + if (Depth >= 3) + return S; + if (const SCEVAddExpr *Add = dyn_cast(S)) { // Break out add operands. for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); - I != E; ++I) - CollectSubexprs(*I, C, Ops, L, SE); - return; + I != E; ++I) { + const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1); + if (Remainder) + Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); + } + return nullptr; } else if (const SCEVAddRecExpr *AR = dyn_cast(S)) { // Split a non-zero base out of an addrec. - if (!AR->getStart()->isZero()) { - CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), - AR->getStepRecurrence(SE), - AR->getLoop(), - //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) - SCEV::FlagAnyWrap), - C, Ops, L, SE); - CollectSubexprs(AR->getStart(), C, Ops, L, SE); - return; + if (AR->getStart()->isZero()) + return S; + + const SCEV *Remainder = CollectSubexprs(AR->getStart(), + C, Ops, L, SE, Depth+1); + // Split the non-zero AddRec unless it is part of a nested recurrence that + // does not pertain to this loop. + if (Remainder && (AR->getLoop() == L || !isa(Remainder))) { + Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); + Remainder = nullptr; + } + if (Remainder != AR->getStart()) { + if (!Remainder) + Remainder = SE.getConstant(AR->getType(), 0); + return SE.getAddRecExpr(Remainder, + AR->getStepRecurrence(SE), + AR->getLoop(), + //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) + SCEV::FlagAnyWrap); } } else if (const SCEVMulExpr *Mul = dyn_cast(S)) { // Break (C * (a + b + c)) into C*a + C*b + C*c. - if (Mul->getNumOperands() == 2) - if (const SCEVConstant *Op0 = - dyn_cast(Mul->getOperand(0))) { - CollectSubexprs(Mul->getOperand(1), - C ? cast(SE.getMulExpr(C, Op0)) : Op0, - Ops, L, SE); - return; - } + if (Mul->getNumOperands() != 2) + return S; + if (const SCEVConstant *Op0 = + dyn_cast(Mul->getOperand(0))) { + C = C ? cast(SE.getMulExpr(C, Op0)) : Op0; + const SCEV *Remainder = + CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); + if (Remainder) + Ops.push_back(SE.getMulExpr(C, Remainder)); + return nullptr; + } } - - // Otherwise use the value itself, optionally with a scale applied. - Ops.push_back(C ? SE.getMulExpr(C, S) : S); + return S; } /// GenerateReassociations - Split out subexpressions from adds and the bases of @@ -2342,7 +3161,9 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, const SCEV *BaseReg = Base.BaseRegs[i]; SmallVector AddOps; - CollectSubexprs(BaseReg, 0, AddOps, L, SE); + const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); + if (Remainder) + AddOps.push_back(Remainder); if (AddOps.size() == 1) continue; @@ -2356,23 +3177,21 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, // Don't pull a constant into a register if the constant could be folded // into an immediate field. - if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, - Base.getNumRegs() > 1, - LU.Kind, LU.AccessTy, TLI, SE)) + if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, *J, Base.getNumRegs() > 1)) continue; // Collect all operands except *J. - SmallVector InnerAddOps - (((const SmallVector &)AddOps).begin(), J); - InnerAddOps.append - (llvm::next(J), ((const SmallVector &)AddOps).end()); + SmallVector InnerAddOps( + ((const SmallVector &)AddOps).begin(), J); + InnerAddOps.append(std::next(J), + ((const SmallVector &)AddOps).end()); // Don't leave just a constant behind in a register if the constant could // be folded into an immediate field. if (InnerAddOps.size() == 1 && - isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, - Base.getNumRegs() > 1, - LU.Kind, LU.AccessTy, TLI, SE)) + isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, + LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) continue; const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); @@ -2382,10 +3201,10 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, // Add the remaining pieces of the add back into the new formula. const SCEVConstant *InnerSumSC = dyn_cast(InnerSum); - if (TLI && InnerSumSC && + if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && - TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + - InnerSumSC->getValue()->getZExtValue())) { + TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + InnerSumSC->getValue()->getZExtValue())) { F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); F.BaseRegs.erase(F.BaseRegs.begin() + i); @@ -2394,9 +3213,9 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, // Add J as its own register, or an unfolded immediate. const SCEVConstant *SC = dyn_cast(*J); - if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && - TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + - SC->getValue()->getZExtValue())) + if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && + TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + SC->getValue()->getZExtValue())) F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); else @@ -2445,7 +3264,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base) { // We can't add a symbolic offset if the address already contains one. - if (Base.AM.BaseGV) return; + if (Base.BaseGV) return; for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { const SCEV *G = Base.BaseRegs[i]; @@ -2453,9 +3272,8 @@ void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, if (G->isZero() || !GV) continue; Formula F = Base; - F.AM.BaseGV = GV; - if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) + F.BaseGV = GV; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) continue; F.BaseRegs[i] = G; (void)InsertFormula(LU, LUIdx, F); @@ -2478,9 +3296,9 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, for (SmallVectorImpl::const_iterator I = Worklist.begin(), E = Worklist.end(); I != E; ++I) { Formula F = Base; - F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; - if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, - LU.Kind, LU.AccessTy, TLI)) { + F.BaseOffset = (uint64_t)Base.BaseOffset - *I; + if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind, + LU.AccessTy, F)) { // Add the offset to the base register. const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); // If it cancelled out, drop the base register, otherwise update it. @@ -2498,9 +3316,8 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, if (G->isZero() || Imm == 0) continue; Formula F = Base; - F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; - if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) + F.BaseOffset = (uint64_t)F.BaseOffset + Imm; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) continue; F.BaseRegs[i] = G; (void)InsertFormula(LU, LUIdx, F); @@ -2521,7 +3338,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, // Don't do this if there is more than one offset. if (LU.MinOffset != LU.MaxOffset) return; - assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); + assert(!Base.BaseGV && "ICmpZero use is not legal!"); // Check each interesting stride. for (SmallSetVector::const_iterator @@ -2529,10 +3346,14 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, int64_t Factor = *I; // Check that the multiplication doesn't overflow. - if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) + if (Base.BaseOffset == INT64_MIN && Factor == -1) + continue; + int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; + if (NewBaseOffset / Factor != Base.BaseOffset) continue; - int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; - if (NewBaseOffs / Factor != Base.AM.BaseOffs) + // If the offset will be truncated at this use, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) continue; // Check that multiplying with the use offset doesn't overflow. @@ -2542,16 +3363,20 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Offset = (uint64_t)Offset * Factor; if (Offset / Factor != LU.MinOffset) continue; + // If the offset will be truncated at this use, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, Offset)) + continue; Formula F = Base; - F.AM.BaseOffs = NewBaseOffs; + F.BaseOffset = NewBaseOffset; // Check that this scale is legal. - if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) + if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) continue; // Compensate for the use having MinOffset built into it. - F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; + F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; const SCEV *FactorS = SE.getConstant(IntTy, Factor); @@ -2576,6 +3401,10 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) continue; + // If the offset will be truncated, check that it is in bounds. + if (!IntTy->isPointerTy() && + !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) + continue; } // If we make it here and it's legal, add it. @@ -2592,23 +3421,23 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { if (!IntTy) return; // If this Formula already has a scaled register, we can't add another one. - if (Base.AM.Scale != 0) return; + if (Base.Scale != 0) return; // Check each interesting stride. for (SmallSetVector::const_iterator I = Factors.begin(), E = Factors.end(); I != E; ++I) { int64_t Factor = *I; - Base.AM.Scale = Factor; - Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; + Base.Scale = Factor; + Base.HasBaseReg = Base.BaseRegs.size() > 1; // Check whether this scale is going to be legal. - if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) { + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, + Base)) { // As a special-case, handle special out-of-loop Basic users specially. // TODO: Reconsider this special case. if (LU.Kind == LSRUse::Basic && - isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, - LSRUse::Special, LU.AccessTy, TLI) && + isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, + LU.AccessTy, Base) && LU.AllFixupsOutsideLoop) LU.Kind = LSRUse::Special; else @@ -2617,7 +3446,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { // For an ICmpZero, negating a solitary base register won't lead to // new solutions. if (LU.Kind == LSRUse::ICmpZero && - !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) + !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) continue; // For each addrec base reg, apply the scale, if possible. for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) @@ -2641,11 +3470,8 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { /// GenerateTruncates - Generate reuse formulae from different IV types. void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { - // This requires TargetLowering to tell us which truncates are free. - if (!TLI) return; - // Don't bother truncating symbolic values. - if (Base.AM.BaseGV) return; + if (Base.BaseGV) return; // Determine the integer type for the base formula. Type *DstTy = Base.getType(); @@ -2655,7 +3481,7 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { for (SmallSetVector::const_iterator I = Types.begin(), E = Types.end(); I != E; ++I) { Type *SrcTy = *I; - if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { + if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { Formula F = Base; if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); @@ -2697,9 +3523,11 @@ void WorkItem::print(raw_ostream &OS) const { << " , add offset " << Imm; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void WorkItem::dump() const { print(errs()); errs() << '\n'; } +#endif /// GenerateCrossUseConstantOffsets - Look for registers which are a constant /// distance apart and try to form reuse opportunities between them. @@ -2759,8 +3587,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // Conservatively examine offsets between this orig reg a few selected // other orig regs. ImmMapTy::const_iterator OtherImms[] = { - Imms.begin(), prior(Imms.end()), - Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) + Imms.begin(), std::prev(Imms.end()), + Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) / + 2) }; for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { ImmMapTy::const_iterator M = OtherImms[i]; @@ -2800,16 +3629,15 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { const Formula &F = LU.Formulae[L]; // Use the immediate in the scaled register. if (F.ScaledReg == OrigReg) { - int64_t Offs = (uint64_t)F.AM.BaseOffs + - Imm * (uint64_t)F.AM.Scale; + int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; // Don't create 50 + reg(-50). if (F.referencesReg(SE.getSCEV( - ConstantInt::get(IntTy, -(uint64_t)Offs)))) + ConstantInt::get(IntTy, -(uint64_t)Offset)))) continue; Formula NewF = F; - NewF.AM.BaseOffs = Offs; - if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) + NewF.BaseOffset = Offset; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, + NewF)) continue; NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); @@ -2818,9 +3646,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // immediate itself, then the formula isn't worthwhile. if (const SCEVConstant *C = dyn_cast(NewF.ScaledReg)) if (C->getValue()->isNegative() != - (NewF.AM.BaseOffs < 0) && - (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) - .ule(abs64(NewF.AM.BaseOffs))) + (NewF.BaseOffset < 0) && + (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale)) + .ule(abs64(NewF.BaseOffset))) continue; // OK, looks good. @@ -2832,11 +3660,10 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { if (BaseReg != OrigReg) continue; Formula NewF = F; - NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; - if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) { - if (!TLI || - !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) + NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; + if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, + LU.Kind, LU.AccessTy, NewF)) { + if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) continue; NewF = F; NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; @@ -2850,11 +3677,11 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); J != JE; ++J) if (const SCEVConstant *C = dyn_cast(*J)) - if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt( - abs64(NewF.AM.BaseOffs)) && + if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt( + abs64(NewF.BaseOffset)) && (C->getValue()->getValue() + - NewF.AM.BaseOffs).countTrailingZeros() >= - CountTrailingZeros_64(NewF.AM.BaseOffs)) + NewF.BaseOffset).countTrailingZeros() >= + countTrailingZeros(NewF.BaseOffset)) goto skip_formula; // Ok, looks good. @@ -2908,13 +3735,14 @@ LSRInstance::GenerateAllReuseFormulae() { void LSRInstance::FilterOutUndesirableDedicatedRegisters() { DenseSet VisitedRegs; SmallPtrSet Regs; + SmallPtrSet LoserRegs; #ifndef NDEBUG bool ChangedFormulae = false; #endif // Collect the best formula for each unique set of shared registers. This // is reset for each use. - typedef DenseMap, size_t, UniquifierDenseMapInfo> + typedef DenseMap, size_t, UniquifierDenseMapInfo> BestFormulaeTy; BestFormulaeTy BestFormulae; @@ -2927,46 +3755,67 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { FIdx != NumForms; ++FIdx) { Formula &F = LU.Formulae[FIdx]; - SmallVector Key; - for (SmallVectorImpl::const_iterator J = F.BaseRegs.begin(), - JE = F.BaseRegs.end(); J != JE; ++J) { - const SCEV *Reg = *J; - if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) - Key.push_back(Reg); + // Some formulas are instant losers. For example, they may depend on + // nonexistent AddRecs from other loops. These need to be filtered + // immediately, otherwise heuristics could choose them over others leading + // to an unsatisfactory solution. Passing LoserRegs into RateFormula here + // avoids the need to recompute this information across formulae using the + // same bad AddRec. Passing LoserRegs is also essential unless we remove + // the corresponding bad register from the Regs set. + Cost CostF; + Regs.clear(); + CostF.RateFormula(TTI, F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, LU, + &LoserRegs); + if (CostF.isLoser()) { + // During initial formula generation, undesirable formulae are generated + // by uses within other loops that have some non-trivial address mode or + // use the postinc form of the IV. LSR needs to provide these formulae + // as the basis of rediscovering the desired formula that uses an AddRec + // corresponding to the existing phi. Once all formulae have been + // generated, these initial losers may be pruned. + DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); + dbgs() << "\n"); } - if (F.ScaledReg && - RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) - Key.push_back(F.ScaledReg); - // Unstable sort by host order ok, because this is only used for - // uniquifying. - std::sort(Key.begin(), Key.end()); - - std::pair P = - BestFormulae.insert(std::make_pair(Key, FIdx)); - if (!P.second) { + else { + SmallVector Key; + for (SmallVectorImpl::const_iterator J = F.BaseRegs.begin(), + JE = F.BaseRegs.end(); J != JE; ++J) { + const SCEV *Reg = *J; + if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) + Key.push_back(Reg); + } + if (F.ScaledReg && + RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) + Key.push_back(F.ScaledReg); + // Unstable sort by host order ok, because this is only used for + // uniquifying. + std::sort(Key.begin(), Key.end()); + + std::pair P = + BestFormulae.insert(std::make_pair(Key, FIdx)); + if (P.second) + continue; + Formula &Best = LU.Formulae[P.first->second]; - Cost CostF; - CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT); - Regs.clear(); Cost CostBest; - CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); Regs.clear(); + CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, LU.Offsets, SE, + DT, LU); if (CostF < CostBest) std::swap(F, Best); DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); dbgs() << "\n" " in favor of formula "; Best.print(dbgs()); dbgs() << '\n'); + } #ifndef NDEBUG - ChangedFormulae = true; + ChangedFormulae = true; #endif - LU.DeleteFormula(F); - --FIdx; - --NumForms; - Any = true; - continue; - } + LU.DeleteFormula(F); + --FIdx; + --NumForms; + Any = true; } // Now that we've filtered out some formulae, recompute the Regs set. @@ -3031,7 +3880,7 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { if (const SCEVConstant *C = dyn_cast(*I)) { Formula NewF = F; - NewF.AM.BaseOffs += C->getValue()->getSExtValue(); + NewF.BaseOffset += C->getValue()->getSExtValue(); NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { @@ -3044,9 +3893,9 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { } } else if (const SCEVUnknown *U = dyn_cast(*I)) { if (GlobalValue *GV = dyn_cast(U->getValue())) - if (!F.AM.BaseGV) { + if (!F.BaseGV) { Formula NewF = F; - NewF.AM.BaseGV = GV; + NewF.BaseGV = GV; NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { @@ -3075,84 +3924,83 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { /// for expressions like A, A+1, A+2, etc., allocate a single register for /// them. void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { - if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { - DEBUG(dbgs() << "The search space is too complex.\n"); + if (EstimateSearchSpaceComplexity() < ComplexityLimit) + return; - DEBUG(dbgs() << "Narrowing the search space by assuming that uses " - "separated by a constant offset will use the same " - "registers.\n"); + DEBUG(dbgs() << "The search space is too complex.\n" + "Narrowing the search space by assuming that uses separated " + "by a constant offset will use the same registers.\n"); - // This is especially useful for unrolled loops. + // This is especially useful for unrolled loops. - for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { - LSRUse &LU = Uses[LUIdx]; - for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), - E = LU.Formulae.end(); I != E; ++I) { - const Formula &F = *I; - if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { - if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { - if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, - /*HasBaseReg=*/false, - LU.Kind, LU.AccessTy)) { - DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); - dbgs() << '\n'); - - LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; - - // Update the relocs to reference the new use. - for (SmallVectorImpl::iterator I = Fixups.begin(), - E = Fixups.end(); I != E; ++I) { - LSRFixup &Fixup = *I; - if (Fixup.LUIdx == LUIdx) { - Fixup.LUIdx = LUThatHas - &Uses.front(); - Fixup.Offset += F.AM.BaseOffs; - // Add the new offset to LUThatHas' offset list. - if (LUThatHas->Offsets.back() != Fixup.Offset) { - LUThatHas->Offsets.push_back(Fixup.Offset); - if (Fixup.Offset > LUThatHas->MaxOffset) - LUThatHas->MaxOffset = Fixup.Offset; - if (Fixup.Offset < LUThatHas->MinOffset) - LUThatHas->MinOffset = Fixup.Offset; - } - DEBUG(dbgs() << "New fixup has offset " - << Fixup.Offset << '\n'); - } - if (Fixup.LUIdx == NumUses-1) - Fixup.LUIdx = LUIdx; - } + for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { + LSRUse &LU = Uses[LUIdx]; + for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), + E = LU.Formulae.end(); I != E; ++I) { + const Formula &F = *I; + if (F.BaseOffset == 0 || F.Scale != 0) + continue; - // Delete formulae from the new use which are no longer legal. - bool Any = false; - for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { - Formula &F = LUThatHas->Formulae[i]; - if (!isLegalUse(F.AM, - LUThatHas->MinOffset, LUThatHas->MaxOffset, - LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { - DEBUG(dbgs() << " Deleting "; F.print(dbgs()); - dbgs() << '\n'); - LUThatHas->DeleteFormula(F); - --i; - --e; - Any = true; - } - } - if (Any) - LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); + LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); + if (!LUThatHas) + continue; - // Delete the old use. - DeleteUse(LU, LUIdx); - --LUIdx; - --NumUses; - break; - } + if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, + LU.Kind, LU.AccessTy)) + continue; + + DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); + + LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; + + // Update the relocs to reference the new use. + for (SmallVectorImpl::iterator I = Fixups.begin(), + E = Fixups.end(); I != E; ++I) { + LSRFixup &Fixup = *I; + if (Fixup.LUIdx == LUIdx) { + Fixup.LUIdx = LUThatHas - &Uses.front(); + Fixup.Offset += F.BaseOffset; + // Add the new offset to LUThatHas' offset list. + if (LUThatHas->Offsets.back() != Fixup.Offset) { + LUThatHas->Offsets.push_back(Fixup.Offset); + if (Fixup.Offset > LUThatHas->MaxOffset) + LUThatHas->MaxOffset = Fixup.Offset; + if (Fixup.Offset < LUThatHas->MinOffset) + LUThatHas->MinOffset = Fixup.Offset; } + DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); } + if (Fixup.LUIdx == NumUses-1) + Fixup.LUIdx = LUIdx; } - } - DEBUG(dbgs() << "After pre-selection:\n"; - print_uses(dbgs())); + // Delete formulae from the new use which are no longer legal. + bool Any = false; + for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { + Formula &F = LUThatHas->Formulae[i]; + if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, + LUThatHas->Kind, LUThatHas->AccessTy, F)) { + DEBUG(dbgs() << " Deleting "; F.print(dbgs()); + dbgs() << '\n'); + LUThatHas->DeleteFormula(F); + --i; + --e; + Any = true; + } + } + + if (Any) + LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); + + // Delete the old use. + DeleteUse(LU, LUIdx); + --LUIdx; + --NumUses; + break; + } } + + DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call @@ -3187,7 +4035,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { // Pick the register which is used by the most LSRUses, which is likely // to be a good reuse register candidate. - const SCEV *Best = 0; + const SCEV *Best = nullptr; unsigned BestNum = 0; for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); I != E; ++I) { @@ -3278,30 +4126,39 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, if (LU.Regs.count(*I)) ReqRegs.insert(*I); - bool AnySatisfiedReqRegs = false; SmallPtrSet NewRegs; Cost NewCost; -retry: for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), E = LU.Formulae.end(); I != E; ++I) { const Formula &F = *I; - // Ignore formulae which do not use any of the required registers. + // Ignore formulae which may not be ideal in terms of register reuse of + // ReqRegs. The formula should use all required registers before + // introducing new ones. + int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); for (SmallSetVector::const_iterator J = ReqRegs.begin(), JE = ReqRegs.end(); J != JE; ++J) { const SCEV *Reg = *J; - if ((!F.ScaledReg || F.ScaledReg != Reg) && - std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == - F.BaseRegs.end()) - goto skip; + if ((F.ScaledReg && F.ScaledReg == Reg) || + std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) != + F.BaseRegs.end()) { + --NumReqRegsToFind; + if (NumReqRegsToFind == 0) + break; + } + } + if (NumReqRegsToFind != 0) { + // If none of the formulae satisfied the required registers, then we could + // clear ReqRegs and try again. Currently, we simply give up in this case. + continue; } - AnySatisfiedReqRegs = true; // Evaluate the cost of the current formula. If it's already worse than // the current best, prune the search at that point. NewCost = CurCost; NewRegs = CurRegs; - NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); + NewCost.RateFormula(TTI, F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT, + LU); if (NewCost < SolutionCost) { Workspace.push_back(&F); if (Workspace.size() != Uses.size()) { @@ -3311,7 +4168,7 @@ retry: VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); } else { DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); - dbgs() << ". Regs:"; + dbgs() << ".\n Regs:"; for (SmallPtrSet::const_iterator I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) dbgs() << ' ' << **I; @@ -3322,18 +4179,6 @@ retry: } Workspace.pop_back(); } - skip:; - } - - if (!EnableRetry && !AnySatisfiedReqRegs) - return; - - // If none of the formulae had all of the required registers, relax the - // constraint so that we don't exclude all formulae. - if (!AnySatisfiedReqRegs) { - assert(!ReqRegs.empty() && "Solver failed even without required registers"); - ReqRegs.clear(); - goto retry; } } @@ -3342,7 +4187,7 @@ retry: void LSRInstance::Solve(SmallVectorImpl &Solution) const { SmallVector Workspace; Cost SolutionCost; - SolutionCost.Loose(); + SolutionCost.Lose(); Cost CurCost; SmallPtrSet CurRegs; DenseSet VisitedRegs; @@ -3400,7 +4245,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, } bool AllDominate = true; - Instruction *BetterPos = 0; + Instruction *BetterPos = nullptr; Instruction *Tentative = IDom->getTerminator(); for (SmallVectorImpl::const_iterator I = Inputs.begin(), E = Inputs.end(); I != E; ++I) { @@ -3412,8 +4257,8 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // Attempt to find an insert position in the middle of the block, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && - (!BetterPos || DT.dominates(BetterPos, Inst))) - BetterPos = llvm::next(BasicBlock::iterator(Inst)); + (!BetterPos || !DT.dominates(Inst, BetterPos))) + BetterPos = std::next(BasicBlock::iterator(Inst)); } if (!AllDominate) break; @@ -3429,9 +4274,10 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, /// AdjustInsertPositionForExpand - Determine an input position which will be /// dominated by the operands and which will dominate the result. BasicBlock::iterator -LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, +LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, const LSRFixup &LF, - const LSRUse &LU) const { + const LSRUse &LU, + SCEVExpander &Rewriter) const { // Collect some instructions which must be dominated by the // expanding replacement. These must be dominated by any operands that // will be required in the expansion. @@ -3466,9 +4312,13 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, } } + assert(!isa(LowestIP) && !isa(LowestIP) + && !isa(LowestIP) && + "Insertion point must be a normal instruction"); + // Then, climb up the immediate dominator tree as far as we can go while // still being dominated by the input positions. - IP = HoistInsertPosition(IP, Inputs); + BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); // Don't insert instructions before PHI nodes. while (isa(IP)) ++IP; @@ -3479,6 +4329,11 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, // Ignore debug intrinsics. while (isa(IP)) ++IP; + // Set IP below instructions recently inserted by SCEVExpander. This keeps the + // IP consistent across expansions and allows the previously inserted + // instructions to be reused by subsequent expansion. + while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; + return IP; } @@ -3490,10 +4345,12 @@ Value *LSRInstance::Expand(const LSRFixup &LF, SCEVExpander &Rewriter, SmallVectorImpl &DeadInsts) const { const LSRUse &LU = Uses[LF.LUIdx]; + if (LU.RigidFormula) + return LF.OperandValToReplace; // Determine an input position which will be dominated by the operands and // which will dominate the result. - IP = AdjustInsertPositionForExpand(IP, LF, LU); + IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); // Inform the Rewriter if we have a post-increment use, so that it can // perform an advantageous expansion. @@ -3527,19 +4384,12 @@ Value *LSRInstance::Expand(const LSRFixup &LF, LF.UserInst, LF.OperandValToReplace, Loops, SE, DT); - Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); - } - - // Flush the operand list to suppress SCEVExpander hoisting. - if (!Ops.empty()) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); - Ops.clear(); - Ops.push_back(SE.getUnknown(FullV)); + Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP))); } // Expand the ScaledReg portion. - Value *ICmpScaledV = 0; - if (F.AM.Scale != 0) { + Value *ICmpScaledV = nullptr; + if (F.Scale != 0) { const SCEV *ScaledS = F.ScaledReg; // If we're expanding for a post-inc user, make the post-inc adjustment. @@ -3552,42 +4402,53 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // An interesting way of "folding" with an icmp is to use a negated // scale, which we'll implement by inserting it into the other operand // of the icmp. - assert(F.AM.Scale == -1 && + assert(F.Scale == -1 && "The only scale supported by ICmpZero uses is -1!"); - ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); + ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP); } else { // Otherwise just expand the scaled register and an explicit scale, // which is expected to be matched as part of the address. - ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); + + // Flush the operand list to suppress SCEVExpander hoisting address modes. + if (!Ops.empty() && LU.Kind == LSRUse::Address) { + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Ops.clear(); + Ops.push_back(SE.getUnknown(FullV)); + } + ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP)); ScaledS = SE.getMulExpr(ScaledS, - SE.getConstant(ScaledS->getType(), F.AM.Scale)); + SE.getConstant(ScaledS->getType(), F.Scale)); Ops.push_back(ScaledS); + } + } - // Flush the operand list to suppress SCEVExpander hoisting. + // Expand the GV portion. + if (F.BaseGV) { + // Flush the operand list to suppress SCEVExpander hoisting. + if (!Ops.empty()) { Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } + Ops.push_back(SE.getUnknown(F.BaseGV)); } - // Expand the GV portion. - if (F.AM.BaseGV) { - Ops.push_back(SE.getUnknown(F.AM.BaseGV)); - - // Flush the operand list to suppress SCEVExpander hoisting. + // Flush the operand list to suppress SCEVExpander hoisting of both folded and + // unfolded offsets. LSR assumes they both live next to their uses. + if (!Ops.empty()) { Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } // Expand the immediate portion. - int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; + int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; if (Offset != 0) { if (LU.Kind == LSRUse::ICmpZero) { // The other interesting way of "folding" with an ICmpZero is to use a // negated immediate. if (!ICmpScaledV) - ICmpScaledV = ConstantInt::get(IntTy, -Offset); + ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); else { Ops.push_back(SE.getUnknown(ICmpScaledV)); ICmpScaledV = ConstantInt::get(IntTy, Offset); @@ -3622,9 +4483,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF, if (LU.Kind == LSRUse::ICmpZero) { ICmpInst *CI = cast(LF.UserInst); DeadInsts.push_back(CI->getOperand(1)); - assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " + assert(!F.BaseGV && "ICmp does not support folding a global value and " "a scale at the same time!"); - if (F.AM.Scale == -1) { + if (F.Scale == -1) { if (ICmpScaledV->getType() != OpTy) { Instruction *Cast = CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, @@ -3634,7 +4495,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, } CI->setOperand(1, ICmpScaledV); } else { - assert(F.AM.Scale == 0 && + assert(F.Scale == 0 && "ICmp does not support folding a global value and " "a scale at the same time!"); Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), @@ -3675,30 +4536,36 @@ void LSRInstance::RewriteForPHI(PHINode *PN, Loop *PNLoop = LI.getLoopFor(Parent); if (!PNLoop || Parent != PNLoop->getHeader()) { // Split the critical edge. - BasicBlock *NewBB = 0; + BasicBlock *NewBB = nullptr; if (!Parent->isLandingPad()) { - NewBB = SplitCriticalEdge(BB, Parent, P); + NewBB = SplitCriticalEdge(BB, Parent, P, + /*MergeIdenticalEdges=*/true, + /*DontDeleteUselessPhis=*/true); } else { SmallVector NewBBs; SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); NewBB = NewBBs[0]; } - - // If PN is outside of the loop and BB is in the loop, we want to - // move the block to be immediately before the PHI block, not - // immediately after BB. - if (L->contains(BB) && !L->contains(PN)) - NewBB->moveBefore(PN->getParent()); - - // Splitting the edge can reduce the number of PHI entries we have. - e = PN->getNumIncomingValues(); - BB = NewBB; - i = PN->getBasicBlockIndex(BB); + // If NewBB==NULL, then SplitCriticalEdge refused to split because all + // phi predecessors are identical. The simple thing to do is skip + // splitting in this case rather than complicate the API. + if (NewBB) { + // If PN is outside of the loop and BB is in the loop, we want to + // move the block to be immediately before the PHI block, not + // immediately after BB. + if (L->contains(BB) && !L->contains(PN)) + NewBB->moveBefore(PN->getParent()); + + // Splitting the edge can reduce the number of PHI entries we have. + e = PN->getNumIncomingValues(); + BB = NewBB; + i = PN->getBasicBlockIndex(BB); + } } } std::pair::iterator, bool> Pair = - Inserted.insert(std::make_pair(BB, static_cast(0))); + Inserted.insert(std::make_pair(BB, static_cast(nullptr))); if (!Pair.second) PN->setIncomingValue(i, Pair.first->second); else { @@ -3767,9 +4634,20 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, SmallVector DeadInsts; SCEVExpander Rewriter(SE, "lsr"); +#ifndef NDEBUG + Rewriter.setDebugType(DEBUG_TYPE); +#endif Rewriter.disableCanonicalMode(); + Rewriter.enableLSRMode(); Rewriter.setIVIncInsertPos(L, IVIncInsertPos); + // Mark phi nodes that terminate chains so the expander tries to reuse them. + for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), + ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { + if (PHINode *PN = dyn_cast(ChainI->tailUserInst())) + Rewriter.setChainedPhi(PN); + } + // Expand the new value definitions and update the users. for (SmallVectorImpl::const_iterator I = Fixups.begin(), E = Fixups.end(); I != E; ++I) { @@ -3780,6 +4658,11 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, Changed = true; } + for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), + ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { + GenerateIVChain(*ChainI, Rewriter, DeadInsts); + Changed = true; + } // Clean up after ourselves. This must be done before deleting any // instructions. Rewriter.clear(); @@ -3787,21 +4670,49 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, Changed |= DeleteTriviallyDeadInstructions(DeadInsts); } -LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) - : IU(P->getAnalysis()), - SE(P->getAnalysis()), - DT(P->getAnalysis()), - LI(P->getAnalysis()), - TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { - +LSRInstance::LSRInstance(Loop *L, Pass *P) + : IU(P->getAnalysis()), SE(P->getAnalysis()), + DT(P->getAnalysis().getDomTree()), + LI(P->getAnalysis()), + TTI(P->getAnalysis()), L(L), Changed(false), + IVIncInsertPos(nullptr) { // If LoopSimplify form is not available, stay out of trouble. - if (!L->isLoopSimplifyForm()) return; + if (!L->isLoopSimplifyForm()) + return; // If there's no interesting work to be done, bail early. if (IU.empty()) return; + // If there's too much analysis to be done, bail early. We won't be able to + // model the problem anyway. + unsigned NumUsers = 0; + for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + if (++NumUsers > MaxIVUsers) { + DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L + << "\n"); + return; + } + } + +#ifndef NDEBUG + // All dominating loops must have preheaders, or SCEVExpander may not be able + // to materialize an AddRecExpr whose Start is an outer AddRecExpr. + // + // IVUsers analysis should only create users that are dominated by simple loop + // headers. Since this loop should dominate all of its users, its user list + // should be empty if this loop itself is not within a simple loop nest. + for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); + Rung; Rung = Rung->getIDom()) { + BasicBlock *BB = Rung->getBlock(); + const Loop *DomLoop = LI.getLoopFor(BB); + if (DomLoop && DomLoop->getHeader() == BB) { + assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); + } + } +#endif // DEBUG + DEBUG(dbgs() << "\nLSR on loop "; - WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); + L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); dbgs() << ":\n"); // First, perform some low-level loop optimizations. @@ -3812,16 +4723,18 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) if (IU.empty()) return; // Skip nested loops until we can model them better with formulae. - if (!EnableNested && !L->empty()) { + if (!L->empty()) { DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); return; } // Start collecting data and preparing for the solver. + CollectChains(); CollectInterestingTypesAndFactors(); CollectFixupsAndInitialFormulae(); CollectLoopInvariantFixupsAndFormulae(); + assert(!Uses.empty() && "IVUsers reported at least one use"); DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; print_uses(dbgs())); @@ -3845,14 +4758,14 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) #ifndef NDEBUG // Formulae should be legal. - for (SmallVectorImpl::const_iterator I = Uses.begin(), - E = Uses.end(); I != E; ++I) { - const LSRUse &LU = *I; - for (SmallVectorImpl::const_iterator J = LU.Formulae.begin(), - JE = LU.Formulae.end(); J != JE; ++J) - assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI) && - "Illegal formula generated!"); + for (SmallVectorImpl::const_iterator I = Uses.begin(), E = Uses.end(); + I != E; ++I) { + const LSRUse &LU = *I; + for (SmallVectorImpl::const_iterator J = LU.Formulae.begin(), + JE = LU.Formulae.end(); + J != JE; ++J) + assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, + *J) && "Illegal formula generated!"); }; #endif @@ -3915,24 +4828,22 @@ void LSRInstance::print(raw_ostream &OS) const { print_uses(OS); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRInstance::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { class LoopStrengthReduce : public LoopPass { - /// TLI - Keep a pointer of a TargetLowering to consult for determining - /// transformation profitability. - const TargetLowering *const TLI; - public: static char ID; // Pass ID, replacement for typeid - explicit LoopStrengthReduce(const TargetLowering *tli = 0); + LoopStrengthReduce(); private: - bool runOnLoop(Loop *L, LPPassManager &LPM); - void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnLoop(Loop *L, LPPassManager &LPM) override; + void getAnalysisUsage(AnalysisUsage &AU) const override; }; } @@ -3940,7 +4851,8 @@ private: char LoopStrengthReduce::ID = 0; INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(IVUsers) INITIALIZE_PASS_DEPENDENCY(LoopInfo) @@ -3949,14 +4861,13 @@ INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) -Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { - return new LoopStrengthReduce(TLI); +Pass *llvm::createLoopStrengthReducePass() { + return new LoopStrengthReduce(); } -LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) - : LoopPass(ID), TLI(tli) { - initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); - } +LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { + initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); +} void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { // We split critical edges, so we change the CFG. However, we do update @@ -3966,8 +4877,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); AU.addPreserved(); AU.addRequiredID(LoopSimplifyID); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addRequired(); AU.addPreserved(); // Requiring LoopSimplify a second time here prevents IVUsers from running @@ -3975,17 +4886,34 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequiredID(LoopSimplifyID); AU.addRequired(); AU.addPreserved(); + AU.addRequired(); } bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { + if (skipOptnoneFunction(L)) + return false; + bool Changed = false; // Run the main LSR transformation. - Changed |= LSRInstance(TLI, L, this).getChanged(); + Changed |= LSRInstance(L, this).getChanged(); - // At this point, it is worth checking to see if any recurrence PHIs are also - // dead, so that we can remove them as well. + // Remove any extra phis created by processing inner loops. Changed |= DeleteDeadPHIs(L->getHeader()); - + if (EnablePhiElim && L->isLoopSimplifyForm()) { + SmallVector DeadInsts; + SCEVExpander Rewriter(getAnalysis(), "lsr"); +#ifndef NDEBUG + Rewriter.setDebugType(DEBUG_TYPE); +#endif + unsigned numFolded = Rewriter.replaceCongruentIVs( + L, &getAnalysis().getDomTree(), DeadInsts, + &getAnalysis()); + if (numFolded) { + Changed = true; + DeleteTriviallyDeadInstructions(DeadInsts); + DeleteDeadPHIs(L->getHeader()); + } + } return Changed; }