X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FLoopStrengthReduce.cpp;h=5a1efbe2c2ec6f58e9d7e8fd4b4ff153e1980c31;hb=d9b0b025612992a0b724eeca8bdf10b1d7a5c355;hp=bd7cc459019169db38ee788b6de8a90b631e412a;hpb=57aaa0b264d9d23d9a14235334b15d4117e32a3b;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index bd7cc459019..94c229a8e24 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -63,18 +63,43 @@ #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" +#include "llvm/Assembly/Writer.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLowering.h" #include using namespace llvm; +/// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for +/// bail out. This threshold is far beyond the number of users that LSR can +/// conceivably solve, so it should not affect generated code, but catches the +/// worst cases before LSR burns too much compile time and stack space. +static const unsigned MaxIVUsers = 200; + +// Temporary flag to cleanup congruent phis after LSR phi expansion. +// It's currently disabled until we can determine whether it's truly useful or +// not. The flag should be removed after the v3.0 release. +// This is now needed for ivchains. +static cl::opt EnablePhiElim( + "enable-lsr-phielim", cl::Hidden, cl::init(true), + cl::desc("Enable LSR phi elimination")); + +#ifndef NDEBUG +// Stress test IV chain generation. +static cl::opt StressIVChain( + "stress-ivchain", cl::Hidden, cl::init(false), + cl::desc("Stress test LSR IV chains")); +#else +static bool StressIVChain = false; +#endif + namespace { /// RegSortData - This class holds data which is used to order reuse candidates. @@ -113,6 +138,7 @@ class RegUseTracker { public: void CountRegister(const SCEV *Reg, size_t LUIdx); void DropRegister(const SCEV *Reg, size_t LUIdx); + void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; @@ -150,11 +176,28 @@ RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { RSD.UsedByIndices.reset(LUIdx); } +void +RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { + assert(LUIdx <= LastLUIdx); + + // Update RegUses. The data structure is not optimized for this purpose; + // we must iterate through it and update each of the bit vectors. + for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); + I != E; ++I) { + SmallBitVector &UsedByIndices = I->second.UsedByIndices; + if (LUIdx < UsedByIndices.size()) + UsedByIndices[LUIdx] = + LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; + UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); + } +} + bool RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { - if (!RegUsesMap.count(Reg)) return false; - const SmallBitVector &UsedByIndices = - RegUsesMap.find(Reg)->second.UsedByIndices; + RegUsesTy::const_iterator I = RegUsesMap.find(Reg); + if (I == RegUsesMap.end()) + return false; + const SmallBitVector &UsedByIndices = I->second.UsedByIndices; int i = UsedByIndices.find_first(); if (i == -1) return false; if ((size_t)i != LUIdx) return true; @@ -190,13 +233,19 @@ struct Formula { /// when AM.Scale is not zero. const SCEV *ScaledReg; - Formula() : ScaledReg(0) {} + /// UnfoldedOffset - An additional constant offset which added near the + /// use. This requires a temporary register, but the offset itself can + /// live in an add immediate field rather than a register. + int64_t UnfoldedOffset; - void InitialMatch(const SCEV *S, Loop *L, - ScalarEvolution &SE, DominatorTree &DT); + Formula() : ScaledReg(0), UnfoldedOffset(0) {} + + void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); unsigned getNumRegs() const; - const Type *getType() const; + Type *getType() const; + + void DeleteBaseReg(const SCEV *&S); bool referencesReg(const SCEV *S) const; bool hasRegsUsedByUsesOtherThan(size_t LUIdx, @@ -212,9 +261,9 @@ struct Formula { static void DoInitialMatch(const SCEV *S, Loop *L, SmallVectorImpl &Good, SmallVectorImpl &Bad, - ScalarEvolution &SE, DominatorTree &DT) { + ScalarEvolution &SE) { // Collect expressions which properly dominate the loop header. - if (S->properlyDominates(L->getHeader(), &DT)) { + if (SE.properlyDominates(S, L->getHeader())) { Good.push_back(S); return; } @@ -223,18 +272,19 @@ static void DoInitialMatch(const SCEV *S, Loop *L, if (const SCEVAddExpr *Add = dyn_cast(S)) { for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); I != E; ++I) - DoInitialMatch(*I, L, Good, Bad, SE, DT); + DoInitialMatch(*I, L, Good, Bad, SE); return; } // Look at addrec operands. if (const SCEVAddRecExpr *AR = dyn_cast(S)) if (!AR->getStart()->isZero()) { - DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); + DoInitialMatch(AR->getStart(), L, Good, Bad, SE); DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), AR->getStepRecurrence(SE), - AR->getLoop()), - L, Good, Bad, SE, DT); + // FIXME: AR->getNoWrapFlags() + AR->getLoop(), SCEV::FlagAnyWrap), + L, Good, Bad, SE); return; } @@ -246,7 +296,7 @@ static void DoInitialMatch(const SCEV *S, Loop *L, SmallVector MyGood; SmallVector MyBad; - DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); + DoInitialMatch(NewMul, L, MyGood, MyBad, SE); const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( SE.getEffectiveSCEVType(NewMul->getType()))); for (SmallVectorImpl::const_iterator I = MyGood.begin(), @@ -266,11 +316,10 @@ static void DoInitialMatch(const SCEV *S, Loop *L, /// InitialMatch - Incorporate loop-variant parts of S into this Formula, /// attempting to keep all loop-invariant and loop-computable values in a /// single base register. -void Formula::InitialMatch(const SCEV *S, Loop *L, - ScalarEvolution &SE, DominatorTree &DT) { +void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { SmallVector Good; SmallVector Bad; - DoInitialMatch(S, L, Good, Bad, SE, DT); + DoInitialMatch(S, L, Good, Bad, SE); if (!Good.empty()) { const SCEV *Sum = SE.getAddExpr(Good); if (!Sum->isZero()) @@ -294,13 +343,20 @@ unsigned Formula::getNumRegs() const { /// getType - Return the type of this formula, if it has one, or null /// otherwise. This type is meaningless except for the bit size. -const Type *Formula::getType() const { +Type *Formula::getType() const { return !BaseRegs.empty() ? BaseRegs.front()->getType() : ScaledReg ? ScaledReg->getType() : AM.BaseGV ? AM.BaseGV->getType() : 0; } +/// DeleteBaseReg - Delete the given base reg from the BaseRegs list. +void Formula::DeleteBaseReg(const SCEV *&S) { + if (&S != &BaseRegs.back()) + std::swap(S, BaseRegs.back()); + BaseRegs.pop_back(); +} + /// referencesReg - Test if this formula references the given register. bool Formula::referencesReg(const SCEV *S) const { return S == ScaledReg || @@ -352,6 +408,10 @@ void Formula::print(raw_ostream &OS) const { OS << ""; OS << ')'; } + if (UnfoldedOffset != 0) { + if (!First) OS << " + "; else First = false; + OS << "imm(" << UnfoldedOffset << ')'; + } } void Formula::dump() const { @@ -361,28 +421,26 @@ void Formula::dump() const { /// isAddRecSExtable - Return true if the given addrec can be sign-extended /// without changing its value. static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { - const Type *WideTy = - IntegerType::get(SE.getContext(), - SE.getTypeSizeInBits(AR->getType()) + 1); + Type *WideTy = + IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); return isa(SE.getSignExtendExpr(AR, WideTy)); } /// isAddSExtable - Return true if the given add can be sign-extended /// without changing its value. static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { - const Type *WideTy = - IntegerType::get(SE.getContext(), - SE.getTypeSizeInBits(A->getType()) + 1); + Type *WideTy = + IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); return isa(SE.getSignExtendExpr(A, WideTy)); } -/// isMulSExtable - Return true if the given add can be sign-extended +/// isMulSExtable - Return true if the given mul can be sign-extended /// without changing its value. -static bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) { - const Type *WideTy = +static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { + Type *WideTy = IntegerType::get(SE.getContext(), - SE.getTypeSizeInBits(A->getType()) + 1); - return isa(SE.getSignExtendExpr(A, WideTy)); + SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); + return isa(SE.getSignExtendExpr(M, WideTy)); } /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined @@ -398,33 +456,45 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, if (LHS == RHS) return SE.getConstant(LHS->getType(), 1); - // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some - // folding. - if (RHS->isAllOnesValue()) - return SE.getMulExpr(LHS, RHS); + // Handle a few RHS special cases. + const SCEVConstant *RC = dyn_cast(RHS); + if (RC) { + const APInt &RA = RC->getValue()->getValue(); + // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do + // some folding. + if (RA.isAllOnesValue()) + return SE.getMulExpr(LHS, RC); + // Handle x /s 1 as x. + if (RA == 1) + return LHS; + } // Check for a division of a constant by a constant. if (const SCEVConstant *C = dyn_cast(LHS)) { - const SCEVConstant *RC = dyn_cast(RHS); if (!RC) return 0; - if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0) + const APInt &LA = C->getValue()->getValue(); + const APInt &RA = RC->getValue()->getValue(); + if (LA.srem(RA) != 0) return 0; - return SE.getConstant(C->getValue()->getValue() - .sdiv(RC->getValue()->getValue())); + return SE.getConstant(LA.sdiv(RA)); } // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) { if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { - const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, - IgnoreSignificantBits); - if (!Start) return 0; const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, IgnoreSignificantBits); if (!Step) return 0; - return SE.getAddRecExpr(Start, Step, AR->getLoop()); + const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, + IgnoreSignificantBits); + if (!Start) return 0; + // FlagNW is independent of the start value, step direction, and is + // preserved with smaller magnitude steps. + // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) + return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); } + return 0; } // Distribute the sdiv over add operands, if the add doesn't overflow. @@ -440,26 +510,29 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, } return SE.getAddExpr(Ops); } + return 0; } // Check for a multiply operand that we can pull RHS out of. - if (const SCEVMulExpr *Mul = dyn_cast(LHS)) + if (const SCEVMulExpr *Mul = dyn_cast(LHS)) { if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { SmallVector Ops; bool Found = false; for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); I != E; ++I) { + const SCEV *S = *I; if (!Found) - if (const SCEV *Q = getExactSDiv(*I, RHS, SE, + if (const SCEV *Q = getExactSDiv(S, RHS, SE, IgnoreSignificantBits)) { - Ops.push_back(Q); + S = Q; Found = true; - continue; } - Ops.push_back(*I); + Ops.push_back(S); } return Found ? SE.getMulExpr(Ops) : 0; } + return 0; + } // Otherwise we don't know. return 0; @@ -477,12 +550,16 @@ static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { } else if (const SCEVAddExpr *Add = dyn_cast(S)) { SmallVector NewOps(Add->op_begin(), Add->op_end()); int64_t Result = ExtractImmediate(NewOps.front(), SE); - S = SE.getAddExpr(NewOps); + if (Result != 0) + S = SE.getAddExpr(NewOps); return Result; } else if (const SCEVAddRecExpr *AR = dyn_cast(S)) { SmallVector NewOps(AR->op_begin(), AR->op_end()); int64_t Result = ExtractImmediate(NewOps.front(), SE); - S = SE.getAddRecExpr(NewOps, AR->getLoop()); + if (Result != 0) + S = SE.getAddRecExpr(NewOps, AR->getLoop(), + // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) + SCEV::FlagAnyWrap); return Result; } return 0; @@ -500,12 +577,16 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { } else if (const SCEVAddExpr *Add = dyn_cast(S)) { SmallVector NewOps(Add->op_begin(), Add->op_end()); GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); - S = SE.getAddExpr(NewOps); + if (Result) + S = SE.getAddExpr(NewOps); return Result; } else if (const SCEVAddRecExpr *AR = dyn_cast(S)) { SmallVector NewOps(AR->op_begin(), AR->op_end()); GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); - S = SE.getAddRecExpr(NewOps, AR->getLoop()); + if (Result) + S = SE.getAddRecExpr(NewOps, AR->getLoop(), + // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) + SCEV::FlagAnyWrap); return Result; } return 0; @@ -524,14 +605,11 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::prefetch: - case Intrinsic::x86_sse2_loadu_dq: - case Intrinsic::x86_sse2_loadu_pd: - case Intrinsic::x86_sse_loadu_ps: case Intrinsic::x86_sse_storeu_ps: case Intrinsic::x86_sse2_storeu_pd: case Intrinsic::x86_sse2_storeu_dq: case Intrinsic::x86_sse2_storel_dq: - if (II->getOperand(1) == OperandVal) + if (II->getArgOperand(0) == OperandVal) isAddress = true; break; } @@ -540,8 +618,8 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) { } /// getAccessType - Return the type of the memory being accessed. -static const Type *getAccessType(const Instruction *Inst) { - const Type *AccessTy = Inst->getType(); +static Type *getAccessType(const Instruction *Inst) { + Type *AccessTy = Inst->getType(); if (const StoreInst *SI = dyn_cast(Inst)) AccessTy = SI->getOperand(0)->getType(); else if (const IntrinsicInst *II = dyn_cast(Inst)) { @@ -553,20 +631,105 @@ static const Type *getAccessType(const Instruction *Inst) { case Intrinsic::x86_sse2_storeu_pd: case Intrinsic::x86_sse2_storeu_dq: case Intrinsic::x86_sse2_storel_dq: - AccessTy = II->getOperand(1)->getType(); + AccessTy = II->getArgOperand(0)->getType(); break; } } // All pointers have the same requirements, so canonicalize them to an // arbitrary pointer type to minimize variation. - if (const PointerType *PTy = dyn_cast(AccessTy)) + if (PointerType *PTy = dyn_cast(AccessTy)) AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), PTy->getAddressSpace()); return AccessTy; } +/// isExistingPhi - Return true if this AddRec is already a phi in its loop. +static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { + for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); + PHINode *PN = dyn_cast(I); ++I) { + if (SE.isSCEVable(PN->getType()) && + (SE.getEffectiveSCEVType(PN->getType()) == + SE.getEffectiveSCEVType(AR->getType())) && + SE.getSCEV(PN) == AR) + return true; + } + return false; +} + +/// Check if expanding this expression is likely to incur significant cost. This +/// is tricky because SCEV doesn't track which expressions are actually computed +/// by the current IR. +/// +/// We currently allow expansion of IV increments that involve adds, +/// multiplication by constants, and AddRecs from existing phis. +/// +/// TODO: Allow UDivExpr if we can find an existing IV increment that is an +/// obvious multiple of the UDivExpr. +static bool isHighCostExpansion(const SCEV *S, + SmallPtrSet &Processed, + ScalarEvolution &SE) { + // Zero/One operand expressions + switch (S->getSCEVType()) { + case scUnknown: + case scConstant: + return false; + case scTruncate: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + case scZeroExtend: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + case scSignExtend: + return isHighCostExpansion(cast(S)->getOperand(), + Processed, SE); + } + + if (!Processed.insert(S)) + return false; + + if (const SCEVAddExpr *Add = dyn_cast(S)) { + for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); + I != E; ++I) { + if (isHighCostExpansion(*I, Processed, SE)) + return true; + } + return false; + } + + if (const SCEVMulExpr *Mul = dyn_cast(S)) { + if (Mul->getNumOperands() == 2) { + // Multiplication by a constant is ok + if (isa(Mul->getOperand(0))) + return isHighCostExpansion(Mul->getOperand(1), Processed, SE); + + // If we have the value of one operand, check if an existing + // multiplication already generates this expression. + if (const SCEVUnknown *U = dyn_cast(Mul->getOperand(1))) { + Value *UVal = U->getValue(); + for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end(); + UI != UE; ++UI) { + // If U is a constant, it may be used by a ConstantExpr. + Instruction *User = dyn_cast(*UI); + if (User && User->getOpcode() == Instruction::Mul + && SE.isSCEVable(User->getType())) { + return SE.getSCEV(User) == Mul; + } + } + } + } + } + + if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (isExistingPhi(AR, SE)) + return false; + } + + // Fow now, consider any other type of expression (div/mul/min/max) high cost. + return true; +} + /// DeleteTriviallyDeadInstructions - If any of the instructions is the /// specified set are trivially dead, delete them and see if this makes any of /// their operands subsequently dead. @@ -575,7 +738,7 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { bool Changed = false; while (!DeadInsts.empty()) { - Instruction *I = dyn_cast_or_null(DeadInsts.pop_back_val()); + Instruction *I = dyn_cast_or_null(&*DeadInsts.pop_back_val()); if (I == 0 || !isInstructionTriviallyDead(I)) continue; @@ -612,18 +775,32 @@ public: : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), SetupCost(0) {} - unsigned getNumRegs() const { return NumRegs; } - bool operator<(const Cost &Other) const; void Loose(); +#ifndef NDEBUG + // Once any of the metrics loses, they must all remain losers. + bool isValid() { + return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds + | ImmCost | SetupCost) != ~0u) + || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds + & ImmCost & SetupCost) == ~0u); + } +#endif + + bool isLoser() { + assert(isValid() && "invalid cost"); + return NumRegs == ~0u; + } + void RateFormula(const Formula &F, SmallPtrSet &Regs, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, - ScalarEvolution &SE, DominatorTree &DT); + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs = 0); void print(raw_ostream &OS) const; void dump() const; @@ -636,7 +813,8 @@ private: void RatePrimaryRegister(const SCEV *Reg, SmallPtrSet &Regs, const Loop *L, - ScalarEvolution &SE, DominatorTree &DT); + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs); }; } @@ -647,37 +825,30 @@ void Cost::RateRegister(const SCEV *Reg, const Loop *L, ScalarEvolution &SE, DominatorTree &DT) { if (const SCEVAddRecExpr *AR = dyn_cast(Reg)) { - if (AR->getLoop() == L) - AddRecCost += 1; /// TODO: This should be a function of the stride. - - // If this is an addrec for a loop that's already been visited by LSR, - // don't second-guess its addrec phi nodes. LSR isn't currently smart - // enough to reason about more than one loop at a time. Consider these - // registers free and leave them alone. - else if (L->contains(AR->getLoop()) || - (!AR->getLoop()->contains(L) && - DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { - for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); - PHINode *PN = dyn_cast(I); ++I) - if (SE.isSCEVable(PN->getType()) && - (SE.getEffectiveSCEVType(PN->getType()) == - SE.getEffectiveSCEVType(AR->getType())) && - SE.getSCEV(PN) == AR) - return; + // If this is an addrec for another loop, don't second-guess its addrec phi + // nodes. LSR isn't currently smart enough to reason about more than one + // loop at a time. LSR has already run on inner loops, will not run on outer + // loops, and cannot be expected to change sibling loops. + if (AR->getLoop() != L) { + // If the AddRec exists, consider it's register free and leave it alone. + if (isExistingPhi(AR, SE)) + return; - // If this isn't one of the addrecs that the loop already has, it - // would require a costly new phi and add. TODO: This isn't - // precisely modeled right now. - ++NumBaseAdds; - if (!Regs.count(AR->getStart())) - RateRegister(AR->getStart(), Regs, L, SE, DT); + // Otherwise, do not consider this formula at all. + Loose(); + return; } + AddRecCost += 1; /// TODO: This should be a function of the stride. // Add the step value register, if it needs one. // TODO: The non-affine case isn't precisely modeled here. - if (!AR->isAffine() || !isa(AR->getOperand(1))) - if (!Regs.count(AR->getStart())) + if (!AR->isAffine() || !isa(AR->getOperand(1))) { + if (!Regs.count(AR->getOperand(1))) { RateRegister(AR->getOperand(1), Regs, L, SE, DT); + if (isLoser()) + return; + } + } } ++NumRegs; @@ -689,16 +860,28 @@ void Cost::RateRegister(const SCEV *Reg, (isa(cast(Reg)->getStart()) || isa(cast(Reg)->getStart())))) ++SetupCost; + + NumIVMuls += isa(Reg) && + SE.hasComputableLoopEvolution(Reg, L); } /// RatePrimaryRegister - Record this register in the set. If we haven't seen it -/// before, rate it. +/// before, rate it. Optional LoserRegs provides a way to declare any formula +/// that refers to one of those regs an instant loser. void Cost::RatePrimaryRegister(const SCEV *Reg, SmallPtrSet &Regs, const Loop *L, - ScalarEvolution &SE, DominatorTree &DT) { - if (Regs.insert(Reg)) + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs) { + if (LoserRegs && LoserRegs->count(Reg)) { + Loose(); + return; + } + if (Regs.insert(Reg)) { RateRegister(Reg, Regs, L, SE, DT); + if (isLoser()) + LoserRegs->insert(Reg); + } } void Cost::RateFormula(const Formula &F, @@ -706,14 +889,17 @@ void Cost::RateFormula(const Formula &F, const DenseSet &VisitedRegs, const Loop *L, const SmallVectorImpl &Offsets, - ScalarEvolution &SE, DominatorTree &DT) { + ScalarEvolution &SE, DominatorTree &DT, + SmallPtrSet *LoserRegs) { // Tally up the registers. if (const SCEV *ScaledReg = F.ScaledReg) { if (VisitedRegs.count(ScaledReg)) { Loose(); return; } - RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); + RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); + if (isLoser()) + return; } for (SmallVectorImpl::const_iterator I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { @@ -722,14 +908,15 @@ void Cost::RateFormula(const Formula &F, Loose(); return; } - RatePrimaryRegister(BaseReg, Regs, L, SE, DT); - - NumIVMuls += isa(BaseReg) && - BaseReg->hasComputableLoopEvolution(L); + RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); + if (isLoser()) + return; } - if (F.BaseRegs.size() > 1) - NumBaseAdds += F.BaseRegs.size() - 1; + // Determine how many (unfolded) adds we'll need inside the loop. + size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); + if (NumBaseParts > 1) + NumBaseAdds += NumBaseParts - 1; // Tally up the non-zero immediates. for (SmallVectorImpl::const_iterator I = Offsets.begin(), @@ -741,9 +928,10 @@ void Cost::RateFormula(const Formula &F, else if (Offset != 0) ImmCost += APInt(64, Offset, true).getMinSignedBits(); } + assert(isValid() && "invalid cost"); } -/// Loose - Set this cost to a loosing value. +/// Loose - Set this cost to a losing value. void Cost::Loose() { NumRegs = ~0u; AddRecCost = ~0u; @@ -827,8 +1015,7 @@ struct LSRFixup { } LSRFixup::LSRFixup() - : UserInst(0), OperandValToReplace(0), - LUIdx(~size_t(0)), Offset(0) {} + : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} /// isUseFullyOutsideLoop - Test whether this fixup always uses its /// value outside of the given loop. @@ -927,7 +1114,7 @@ public: }; KindType Kind; - const Type *AccessTy; + Type *AccessTy; SmallVector Offsets; int64_t MinOffset; @@ -938,6 +1125,12 @@ public: /// may be used. bool AllFixupsOutsideLoop; + /// WidestFixupType - This records the widest use type for any fixup using + /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different + /// max fixup widths to be equivalent, because the narrower one may be relying + /// on the implicit truncation to truncate away bogus bits. + Type *WidestFixupType; + /// Formulae - A list of ways to build a value that can satisfy this user. /// After the list is populated, one of these is selected heuristically and /// used to formulate a replacement for OperandValToReplace in UserInst. @@ -946,21 +1139,33 @@ public: /// Regs - The set of register candidates used by all formulae in this LSRUse. SmallPtrSet Regs; - LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), + LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), MinOffset(INT64_MAX), MaxOffset(INT64_MIN), - AllFixupsOutsideLoop(true) {} + AllFixupsOutsideLoop(true), + WidestFixupType(0) {} + bool HasFormulaWithSameRegs(const Formula &F) const; bool InsertFormula(const Formula &F); void DeleteFormula(Formula &F); void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); - void check() const; - void print(raw_ostream &OS) const; void dump() const; }; +} + +/// HasFormula - Test whether this use as a formula which has the same +/// registers as the given formula. +bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { + SmallVector Key = F.BaseRegs; + if (F.ScaledReg) Key.push_back(F.ScaledReg); + // Unstable sort by host order ok, because this is only used for uniquifying. + std::sort(Key.begin(), Key.end()); + return Uniquifier.count(Key); +} + /// InsertFormula - If the given formula has not yet been inserted, add it to /// the list, and return true. Return false otherwise. bool LSRUse::InsertFormula(const Formula &F) { @@ -985,7 +1190,6 @@ bool LSRUse::InsertFormula(const Formula &F) { Formulae.push_back(F); // Record registers now being used by this use. - if (F.ScaledReg) Regs.insert(F.ScaledReg); Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); return true; @@ -993,7 +1197,8 @@ bool LSRUse::InsertFormula(const Formula &F) { /// DeleteFormula - Remove the given formula from this use's list. void LSRUse::DeleteFormula(Formula &F) { - std::swap(F, Formulae.back()); + if (&F != &Formulae.back()) + std::swap(F, Formulae.back()); Formulae.pop_back(); } @@ -1002,8 +1207,9 @@ void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { // Now that we've filtered out some formulae, recompute the Regs set. SmallPtrSet OldRegs = Regs; Regs.clear(); - for (size_t FIdx = 0, NumForms = Formulae.size(); FIdx != NumForms; ++FIdx) { - Formula &F = Formulae[FIdx]; + for (SmallVectorImpl::const_iterator I = Formulae.begin(), + E = Formulae.end(); I != E; ++I) { + const Formula &F = *I; if (F.ScaledReg) Regs.insert(F.ScaledReg); Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); } @@ -1033,13 +1239,16 @@ void LSRUse::print(raw_ostream &OS) const { for (SmallVectorImpl::const_iterator I = Offsets.begin(), E = Offsets.end(); I != E; ++I) { OS << *I; - if (next(I) != E) + if (llvm::next(I) != E) OS << ','; } OS << '}'; if (AllFixupsOutsideLoop) OS << ", all-fixups-outside-loop"; + + if (WidestFixupType) + OS << ", widest fixup type: " << *WidestFixupType; } void LSRUse::dump() const { @@ -1050,7 +1259,7 @@ void LSRUse::dump() const { /// be completely folded into the user instruction at isel time. This includes /// address-mode folding and special icmp tricks. static bool isLegalUse(const TargetLowering::AddrMode &AM, - LSRUse::KindType Kind, const Type *AccessTy, + LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI) { switch (Kind) { case LSRUse::Address: @@ -1079,10 +1288,19 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM, // If we have low-level target information, ask the target if it can fold an // integer immediate on an icmp. if (AM.BaseOffs != 0) { - if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); - return false; + if (!TLI) + return false; + // We have one of: + // ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset + // ICmpZero -1*ScaleReg + Offset => ICmp ScaleReg, Offset + // Offs is the ICmp immediate. + int64_t Offs = AM.BaseOffs; + if (AM.Scale == 0) + Offs = -(uint64_t)Offs; // The cast does the right thing with INT64_MIN. + return TLI->isLegalICmpImmediate(Offs); } + // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg return true; case LSRUse::Basic: @@ -1094,12 +1312,12 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM, return AM.Scale == 0 || AM.Scale == -1; } - return false; + llvm_unreachable("Invalid LSRUse Kind!"); } static bool isLegalUse(TargetLowering::AddrMode AM, int64_t MinOffset, int64_t MaxOffset, - LSRUse::KindType Kind, const Type *AccessTy, + LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI) { // Check for overflow. if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != @@ -1121,7 +1339,7 @@ static bool isLegalUse(TargetLowering::AddrMode AM, static bool isAlwaysFoldable(int64_t BaseOffs, GlobalValue *BaseGV, bool HasBaseReg, - LSRUse::KindType Kind, const Type *AccessTy, + LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI) { // Fast-path: zero is always foldable. if (BaseOffs == 0 && !BaseGV) return true; @@ -1134,13 +1352,20 @@ static bool isAlwaysFoldable(int64_t BaseOffs, AM.HasBaseReg = HasBaseReg; AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; + // Canonicalize a scale of 1 to a base register if the formula doesn't + // already have a base register. + if (!AM.HasBaseReg && AM.Scale == 1) { + AM.Scale = 0; + AM.HasBaseReg = true; + } + return isLegalUse(AM, Kind, AccessTy, TLI); } static bool isAlwaysFoldable(const SCEV *S, int64_t MinOffset, int64_t MaxOffset, bool HasBaseReg, - LSRUse::KindType Kind, const Type *AccessTy, + LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI, ScalarEvolution &SE) { // Fast-path: zero is always foldable. @@ -1168,30 +1393,94 @@ static bool isAlwaysFoldable(const SCEV *S, return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); } -/// FormulaSorter - This class implements an ordering for formulae which sorts -/// the by their standalone cost. -class FormulaSorter { - /// These two sets are kept empty, so that we compute standalone costs. - DenseSet VisitedRegs; - SmallPtrSet Regs; - Loop *L; - LSRUse *LU; - ScalarEvolution &SE; - DominatorTree &DT; +namespace { -public: - FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) - : L(l), LU(&lu), SE(se), DT(dt) {} - - bool operator()(const Formula &A, const Formula &B) { - Cost CostA; - CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); - Regs.clear(); - Cost CostB; - CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); - Regs.clear(); - return CostA < CostB; +/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding +/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. +struct UseMapDenseMapInfo { + static std::pair getEmptyKey() { + return std::make_pair(reinterpret_cast(-1), LSRUse::Basic); + } + + static std::pair getTombstoneKey() { + return std::make_pair(reinterpret_cast(-2), LSRUse::Basic); + } + + static unsigned + getHashValue(const std::pair &V) { + unsigned Result = DenseMapInfo::getHashValue(V.first); + Result ^= DenseMapInfo::getHashValue(unsigned(V.second)); + return Result; } + + static bool isEqual(const std::pair &LHS, + const std::pair &RHS) { + return LHS == RHS; + } +}; + +/// IVInc - An individual increment in a Chain of IV increments. +/// Relate an IV user to an expression that computes the IV it uses from the IV +/// used by the previous link in the Chain. +/// +/// For the head of a chain, IncExpr holds the absolute SCEV expression for the +/// original IVOperand. The head of the chain's IVOperand is only valid during +/// chain collection, before LSR replaces IV users. During chain generation, +/// IncExpr can be used to find the new IVOperand that computes the same +/// expression. +struct IVInc { + Instruction *UserInst; + Value* IVOperand; + const SCEV *IncExpr; + + IVInc(Instruction *U, Value *O, const SCEV *E): + UserInst(U), IVOperand(O), IncExpr(E) {} +}; + +// IVChain - The list of IV increments in program order. +// We typically add the head of a chain without finding subsequent links. +struct IVChain { + SmallVector Incs; + const SCEV *ExprBase; + + IVChain() : ExprBase(0) {} + + IVChain(const IVInc &Head, const SCEV *Base) + : Incs(1, Head), ExprBase(Base) {} + + typedef SmallVectorImpl::const_iterator const_iterator; + + // begin - return the first increment in the chain. + const_iterator begin() const { + assert(!Incs.empty()); + return llvm::next(Incs.begin()); + } + const_iterator end() const { + return Incs.end(); + } + + // hasIncs - Returns true if this chain contains any increments. + bool hasIncs() const { return Incs.size() >= 2; } + + // add - Add an IVInc to the end of this chain. + void add(const IVInc &X) { Incs.push_back(X); } + + // tailUserInst - Returns the last UserInst in the chain. + Instruction *tailUserInst() const { return Incs.back().UserInst; } + + // isProfitableIncrement - Returns true if IncExpr can be profitably added to + // this chain. + bool isProfitableIncrement(const SCEV *OperExpr, + const SCEV *IncExpr, + ScalarEvolution&); +}; + +/// ChainUsers - Helper for CollectChains to track multiple IV increment uses. +/// Distinguish between FarUsers that definitely cross IV increments and +/// NearUsers that may be used between IV increments. +struct ChainUsers { + SmallPtrSet FarUsers; + SmallPtrSet NearUsers; }; /// LSRInstance - This class holds state for the main loop strength reduction @@ -1215,7 +1504,7 @@ class LSRInstance { SmallSetVector Factors; /// Types - Interesting use types, to facilitate truncation reuse. - SmallSetVector Types; + SmallSetVector Types; /// Fixups - The list of operands which are to be replaced. SmallVector Fixups; @@ -1226,10 +1515,28 @@ class LSRInstance { /// RegUses - Track which uses use which register candidates. RegUseTracker RegUses; + // Limit the number of chains to avoid quadratic behavior. We don't expect to + // have more than a few IV increment chains in a loop. Missing a Chain falls + // back to normal LSR behavior for those uses. + static const unsigned MaxChains = 8; + + /// IVChainVec - IV users can form a chain of IV increments. + SmallVector IVChainVec; + + /// IVIncSet - IV users that belong to profitable IVChains. + SmallPtrSet IVIncSet; + void OptimizeShadowIV(); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); - bool OptimizeLoopTermCond(); + void OptimizeLoopTermCond(); + + void ChainInstruction(Instruction *UserInst, Instruction *IVOper, + SmallVectorImpl &ChainUsersVec); + void FinalizeChain(IVChain &Chain); + void CollectChains(); + void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts); void CollectInterestingTypesAndFactors(); void CollectFixupsAndInitialFormulae(); @@ -1240,17 +1547,22 @@ class LSRInstance { } // Support for sharing of LSRUses between LSRFixups. - typedef DenseMap UseMapTy; + typedef DenseMap, + size_t, + UseMapDenseMapInfo> UseMapTy; UseMapTy UseMap; - bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, - LSRUse::KindType Kind, const Type *AccessTy); + bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, + LSRUse::KindType Kind, Type *AccessTy); std::pair getUse(const SCEV *&Expr, LSRUse::KindType Kind, - const Type *AccessTy); + Type *AccessTy); + + void DeleteUse(LSRUse &LU, size_t LUIdx); + + LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); -public: void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void CountRegisters(const Formula &F, size_t LUIdx); @@ -1272,6 +1584,10 @@ public: void FilterOutUndesirableDedicatedRegisters(); size_t EstimateSearchSpaceComplexity() const; + void NarrowSearchSpaceByDetectingSupersets(); + void NarrowSearchSpaceByCollapsingUnrolledCode(); + void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); + void NarrowSearchSpaceByPickingWinnerRegs(); void NarrowSearchSpaceUsingHeuristics(); void SolveRecurse(SmallVectorImpl &Solution, @@ -1285,9 +1601,11 @@ public: BasicBlock::iterator HoistInsertPosition(BasicBlock::iterator IP, const SmallVectorImpl &Inputs) const; - BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, - const LSRFixup &LF, - const LSRUse &LU) const; + BasicBlock::iterator + AdjustInsertPositionForExpand(BasicBlock::iterator IP, + const LSRFixup &LF, + const LSRUse &LU, + SCEVExpander &Rewriter) const; Value *Expand(const LSRFixup &LF, const Formula &F, @@ -1307,6 +1625,7 @@ public: void ImplementSolution(const SmallVectorImpl &Solution, Pass *P); +public: LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); bool getChanged() const { return Changed; } @@ -1332,7 +1651,8 @@ void LSRInstance::OptimizeShadowIV() { IVUsers::const_iterator CandidateUI = UI; ++UI; Instruction *ShadowUse = CandidateUI->getUser(); - const Type *DestTy = NULL; + Type *DestTy = NULL; + bool IsSigned = false; /* If shadow use is a int->float cast then insert a second IV to eliminate this cast. @@ -1346,10 +1666,14 @@ void LSRInstance::OptimizeShadowIV() { for (unsigned i = 0; i < n; ++i, ++d) foo(d); */ - if (UIToFPInst *UCast = dyn_cast(CandidateUI->getUser())) + if (UIToFPInst *UCast = dyn_cast(CandidateUI->getUser())) { + IsSigned = false; DestTy = UCast->getDestTy(); - else if (SIToFPInst *SCast = dyn_cast(CandidateUI->getUser())) + } + else if (SIToFPInst *SCast = dyn_cast(CandidateUI->getUser())) { + IsSigned = true; DestTy = SCast->getDestTy(); + } if (!DestTy) continue; if (TLI) { @@ -1363,7 +1687,7 @@ void LSRInstance::OptimizeShadowIV() { if (!PH) continue; if (PH->getNumIncomingValues() != 2) continue; - const Type *SrcTy = PH->getType(); + Type *SrcTy = PH->getType(); int Mantissa = DestTy->getFPMantissaWidth(); if (Mantissa == -1) continue; if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) @@ -1380,7 +1704,9 @@ void LSRInstance::OptimizeShadowIV() { ConstantInt *Init = dyn_cast(PH->getIncomingValue(Entry)); if (!Init) continue; - Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); + Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? + (double)Init->getSExtValue() : + (double)Init->getZExtValue()); BinaryOperator *Incr = dyn_cast(PH->getIncomingValue(Latch)); @@ -1405,7 +1731,7 @@ void LSRInstance::OptimizeShadowIV() { if (!C->getValue().isStrictlyPositive()) continue; /* Add new PHINode. */ - PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); + PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); /* create new increment. '++d' in above example. */ Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); @@ -1420,6 +1746,7 @@ void LSRInstance::OptimizeShadowIV() { /* Remove cast operation */ ShadowUse->replaceAllUsesWith(NewPH); ShadowUse->eraseFromParent(); + Changed = true; break; } } @@ -1427,8 +1754,7 @@ void LSRInstance::OptimizeShadowIV() { /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, /// set the IV user and stride information and return true, otherwise return /// false. -bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, - IVStrideUse *&CondUse) { +bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) if (UI->getUser() == Cond) { // NOTE: we could handle setcc instructions with multiple uses here, but @@ -1505,7 +1831,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); // Add one to the backedge-taken count to get the trip count. - const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); + const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); if (IterationCount != SE.getSCEV(Sel)) return Cond; // Check for a max calculation that matches the pattern. There's no check @@ -1574,8 +1900,11 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { NewRHS = Sel->getOperand(1); else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) NewRHS = Sel->getOperand(2); + else if (const SCEVUnknown *SU = dyn_cast(MaxRHS)) + NewRHS = SU->getValue(); else - llvm_unreachable("Max doesn't match expected pattern!"); + // Max doesn't match expected pattern. + return Cond; // Determine the new comparison opcode. It may be signed or unsigned, // and the original comparison may be either equality or inequality. @@ -1600,7 +1929,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { /// OptimizeLoopTermCond - Change loop terminating condition to use the /// postinc iv when possible. -bool +void LSRInstance::OptimizeLoopTermCond() { SmallPtrSet PostIncs; @@ -1666,22 +1995,22 @@ LSRInstance::OptimizeLoopTermCond() { } if (const SCEVConstant *D = dyn_cast_or_null(getExactSDiv(B, A, SE))) { + const ConstantInt *C = D->getValue(); // Stride of one or negative one can have reuse with non-addresses. - if (D->getValue()->isOne() || - D->getValue()->isAllOnesValue()) + if (C->isOne() || C->isAllOnesValue()) goto decline_post_inc; // Avoid weird situations. - if (D->getValue()->getValue().getMinSignedBits() >= 64 || - D->getValue()->getValue().isMinSignedValue()) + if (C->getValue().getMinSignedBits() >= 64 || + C->getValue().isMinSignedValue()) goto decline_post_inc; // Without TLI, assume that any stride might be valid, and so any // use might be shared. if (!TLI) goto decline_post_inc; // Check for possible scaled-address reuse. - const Type *AccessTy = getAccessType(UI->getUser()); + Type *AccessTy = getAccessType(UI->getUser()); TargetLowering::AddrMode AM; - AM.Scale = D->getValue()->getSExtValue(); + AM.Scale = C->getSExtValue(); if (TLI->isLegalAddressingMode(AM, AccessTy)) goto decline_post_inc; AM.Scale = -AM.Scale; @@ -1736,16 +2065,17 @@ LSRInstance::OptimizeLoopTermCond() { else if (BB != IVIncInsertPos->getParent()) IVIncInsertPos = BB->getTerminator(); } - - return Changed; } +/// reconcileNewOffset - Determine if the given use can accommodate a fixup +/// at the given offset and other details. If so, update the use and +/// return true. bool -LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, - LSRUse::KindType Kind, const Type *AccessTy) { +LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, + LSRUse::KindType Kind, Type *AccessTy) { int64_t NewMinOffset = LU.MinOffset; int64_t NewMaxOffset = LU.MaxOffset; - const Type *NewAccessTy = AccessTy; + Type *NewAccessTy = AccessTy; // Check for a mismatched kind. It's tempting to collapse mismatched kinds to // something conservative, however this can pessimize in the case that one of @@ -1754,17 +2084,19 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, return false; // Conservatively assume HasBaseReg is true for now. if (NewOffset < LU.MinOffset) { - if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true, + if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, Kind, AccessTy, TLI)) return false; NewMinOffset = NewOffset; } else if (NewOffset > LU.MaxOffset) { - if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true, + if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, Kind, AccessTy, TLI)) return false; NewMaxOffset = NewOffset; } // Check for a mismatched access type, and fall back conservatively as needed. + // TODO: Be less conservative when the type is similar and can use the same + // addressing modes. if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) NewAccessTy = Type::getVoidTy(AccessTy->getContext()); @@ -1782,7 +2114,7 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, /// Either reuse an existing use or create a new one, as needed. std::pair LSRInstance::getUse(const SCEV *&Expr, - LSRUse::KindType Kind, const Type *AccessTy) { + LSRUse::KindType Kind, Type *AccessTy) { const SCEV *Copy = Expr; int64_t Offset = ExtractImmediate(Expr, SE); @@ -1793,12 +2125,12 @@ LSRInstance::getUse(const SCEV *&Expr, } std::pair P = - UseMap.insert(std::make_pair(Expr, 0)); + UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); if (!P.second) { // A use already existed with this base. size_t LUIdx = P.first->second; LSRUse &LU = Uses[LUIdx]; - if (reconcileNewOffset(LU, Offset, Kind, AccessTy)) + if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) // Reuse this use. return std::make_pair(LUIdx, Offset); } @@ -1819,6 +2151,60 @@ LSRInstance::getUse(const SCEV *&Expr, return std::make_pair(LUIdx, Offset); } +/// DeleteUse - Delete the given use from the Uses list. +void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { + if (&LU != &Uses.back()) + std::swap(LU, Uses.back()); + Uses.pop_back(); + + // Update RegUses. + RegUses.SwapAndDropUse(LUIdx, Uses.size()); +} + +/// FindUseWithFormula - Look for a use distinct from OrigLU which is has +/// a formula that has the same registers as the given formula. +LSRUse * +LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, + const LSRUse &OrigLU) { + // Search all uses for the formula. This could be more clever. + for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { + LSRUse &LU = Uses[LUIdx]; + // Check whether this use is close enough to OrigLU, to see whether it's + // worthwhile looking through its formulae. + // Ignore ICmpZero uses because they may contain formulae generated by + // GenerateICmpZeroScales, in which case adding fixup offsets may + // be invalid. + if (&LU != &OrigLU && + LU.Kind != LSRUse::ICmpZero && + LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && + LU.WidestFixupType == OrigLU.WidestFixupType && + LU.HasFormulaWithSameRegs(OrigF)) { + // Scan through this use's formulae. + for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), + E = LU.Formulae.end(); I != E; ++I) { + const Formula &F = *I; + // Check to see if this formula has the same registers and symbols + // as OrigF. + if (F.BaseRegs == OrigF.BaseRegs && + F.ScaledReg == OrigF.ScaledReg && + F.AM.BaseGV == OrigF.AM.BaseGV && + F.AM.Scale == OrigF.AM.Scale && + F.UnfoldedOffset == OrigF.UnfoldedOffset) { + if (F.AM.BaseOffs == 0) + return &LU; + // This is the formula where all the registers and symbols matched; + // there aren't going to be any others. Since we declined it, we + // can skip the rest of the formulae and proceed to the next LSRUse. + break; + } + } + } + } + + // Nothing looked good. + return 0; +} + void LSRInstance::CollectInterestingTypesAndFactors() { SmallSetVector Strides; @@ -1835,10 +2221,11 @@ void LSRInstance::CollectInterestingTypesAndFactors() { do { const SCEV *S = Worklist.pop_back_val(); if (const SCEVAddRecExpr *AR = dyn_cast(S)) { - Strides.insert(AR->getStepRecurrence(SE)); + if (AR->getLoop() == L) + Strides.insert(AR->getStepRecurrence(SE)); Worklist.push_back(AR->getStart()); } else if (const SCEVAddExpr *Add = dyn_cast(S)) { - Worklist.insert(Worklist.end(), Add->op_begin(), Add->op_end()); + Worklist.append(Add->op_begin(), Add->op_end()); } } while (!Worklist.empty()); } @@ -1847,7 +2234,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() { for (SmallSetVector::const_iterator I = Strides.begin(), E = Strides.end(); I != E; ++I) for (SmallSetVector::const_iterator NewStrideIter = - next(I); NewStrideIter != E; ++NewStrideIter) { + llvm::next(I); NewStrideIter != E; ++NewStrideIter) { const SCEV *OldStride = *I; const SCEV *NewStride = *NewStrideIter; @@ -1881,16 +2268,547 @@ void LSRInstance::CollectInterestingTypesAndFactors() { DEBUG(print_factors_and_types(dbgs())); } +/// findIVOperand - Helper for CollectChains that finds an IV operand (computed +/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped +/// Instructions to IVStrideUses, we could partially skip this. +static User::op_iterator +findIVOperand(User::op_iterator OI, User::op_iterator OE, + Loop *L, ScalarEvolution &SE) { + for(; OI != OE; ++OI) { + if (Instruction *Oper = dyn_cast(*OI)) { + if (!SE.isSCEVable(Oper->getType())) + continue; + + if (const SCEVAddRecExpr *AR = + dyn_cast(SE.getSCEV(Oper))) { + if (AR->getLoop() == L) + break; + } + } + } + return OI; +} + +/// getWideOperand - IVChain logic must consistenctly peek base TruncInst +/// operands, so wrap it in a convenient helper. +static Value *getWideOperand(Value *Oper) { + if (TruncInst *Trunc = dyn_cast(Oper)) + return Trunc->getOperand(0); + return Oper; +} + +/// isCompatibleIVType - Return true if we allow an IV chain to include both +/// types. +static bool isCompatibleIVType(Value *LVal, Value *RVal) { + Type *LType = LVal->getType(); + Type *RType = RVal->getType(); + return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); +} + +/// getExprBase - Return an approximation of this SCEV expression's "base", or +/// NULL for any constant. Returning the expression itself is +/// conservative. Returning a deeper subexpression is more precise and valid as +/// long as it isn't less complex than another subexpression. For expressions +/// involving multiple unscaled values, we need to return the pointer-type +/// SCEVUnknown. This avoids forming chains across objects, such as: +/// PrevOper==a[i], IVOper==b[i], IVInc==b-a. +/// +/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost +/// SCEVUnknown, we simply return the rightmost SCEV operand. +static const SCEV *getExprBase(const SCEV *S) { + switch (S->getSCEVType()) { + default: // uncluding scUnknown. + return S; + case scConstant: + return 0; + case scTruncate: + return getExprBase(cast(S)->getOperand()); + case scZeroExtend: + return getExprBase(cast(S)->getOperand()); + case scSignExtend: + return getExprBase(cast(S)->getOperand()); + case scAddExpr: { + // Skip over scaled operands (scMulExpr) to follow add operands as long as + // there's nothing more complex. + // FIXME: not sure if we want to recognize negation. + const SCEVAddExpr *Add = cast(S); + for (std::reverse_iterator I(Add->op_end()), + E(Add->op_begin()); I != E; ++I) { + const SCEV *SubExpr = *I; + if (SubExpr->getSCEVType() == scAddExpr) + return getExprBase(SubExpr); + + if (SubExpr->getSCEVType() != scMulExpr) + return SubExpr; + } + return S; // all operands are scaled, be conservative. + } + case scAddRecExpr: + return getExprBase(cast(S)->getStart()); + } +} + +/// Return true if the chain increment is profitable to expand into a loop +/// invariant value, which may require its own register. A profitable chain +/// increment will be an offset relative to the same base. We allow such offsets +/// to potentially be used as chain increment as long as it's not obviously +/// expensive to expand using real instructions. +bool IVChain::isProfitableIncrement(const SCEV *OperExpr, + const SCEV *IncExpr, + ScalarEvolution &SE) { + // Aggressively form chains when -stress-ivchain. + if (StressIVChain) + return true; + + // Do not replace a constant offset from IV head with a nonconstant IV + // increment. + if (!isa(IncExpr)) { + const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); + if (isa(SE.getMinusSCEV(OperExpr, HeadExpr))) + return 0; + } + + SmallPtrSet Processed; + return !isHighCostExpansion(IncExpr, Processed, SE); +} + +/// Return true if the number of registers needed for the chain is estimated to +/// be less than the number required for the individual IV users. First prohibit +/// any IV users that keep the IV live across increments (the Users set should +/// be empty). Next count the number and type of increments in the chain. +/// +/// Chaining IVs can lead to considerable code bloat if ISEL doesn't +/// effectively use postinc addressing modes. Only consider it profitable it the +/// increments can be computed in fewer registers when chained. +/// +/// TODO: Consider IVInc free if it's already used in another chains. +static bool +isProfitableChain(IVChain &Chain, SmallPtrSet &Users, + ScalarEvolution &SE, const TargetLowering *TLI) { + if (StressIVChain) + return true; + + if (!Chain.hasIncs()) + return false; + + if (!Users.empty()) { + DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; + for (SmallPtrSet::const_iterator I = Users.begin(), + E = Users.end(); I != E; ++I) { + dbgs() << " " << **I << "\n"; + }); + return false; + } + assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); + + // The chain itself may require a register, so intialize cost to 1. + int cost = 1; + + // A complete chain likely eliminates the need for keeping the original IV in + // a register. LSR does not currently know how to form a complete chain unless + // the header phi already exists. + if (isa(Chain.tailUserInst()) + && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { + --cost; + } + const SCEV *LastIncExpr = 0; + unsigned NumConstIncrements = 0; + unsigned NumVarIncrements = 0; + unsigned NumReusedIncrements = 0; + for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); + I != E; ++I) { + + if (I->IncExpr->isZero()) + continue; + + // Incrementing by zero or some constant is neutral. We assume constants can + // be folded into an addressing mode or an add's immediate operand. + if (isa(I->IncExpr)) { + ++NumConstIncrements; + continue; + } + + if (I->IncExpr == LastIncExpr) + ++NumReusedIncrements; + else + ++NumVarIncrements; + + LastIncExpr = I->IncExpr; + } + // An IV chain with a single increment is handled by LSR's postinc + // uses. However, a chain with multiple increments requires keeping the IV's + // value live longer than it needs to be if chained. + if (NumConstIncrements > 1) + --cost; + + // Materializing increment expressions in the preheader that didn't exist in + // the original code may cost a register. For example, sign-extended array + // indices can produce ridiculous increments like this: + // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) + cost += NumVarIncrements; + + // Reusing variable increments likely saves a register to hold the multiple of + // the stride. + cost -= NumReusedIncrements; + + DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost + << "\n"); + + return cost < 0; +} + +/// ChainInstruction - Add this IV user to an existing chain or make it the head +/// of a new chain. +void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, + SmallVectorImpl &ChainUsersVec) { + // When IVs are used as types of varying widths, they are generally converted + // to a wider type with some uses remaining narrow under a (free) trunc. + Value *const NextIV = getWideOperand(IVOper); + const SCEV *const OperExpr = SE.getSCEV(NextIV); + const SCEV *const OperExprBase = getExprBase(OperExpr); + + // Visit all existing chains. Check if its IVOper can be computed as a + // profitable loop invariant increment from the last link in the Chain. + unsigned ChainIdx = 0, NChains = IVChainVec.size(); + const SCEV *LastIncExpr = 0; + for (; ChainIdx < NChains; ++ChainIdx) { + IVChain &Chain = IVChainVec[ChainIdx]; + + // Prune the solution space aggressively by checking that both IV operands + // are expressions that operate on the same unscaled SCEVUnknown. This + // "base" will be canceled by the subsequent getMinusSCEV call. Checking + // first avoids creating extra SCEV expressions. + if (!StressIVChain && Chain.ExprBase != OperExprBase) + continue; + + Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); + if (!isCompatibleIVType(PrevIV, NextIV)) + continue; + + // A phi node terminates a chain. + if (isa(UserInst) && isa(Chain.tailUserInst())) + continue; + + // The increment must be loop-invariant so it can be kept in a register. + const SCEV *PrevExpr = SE.getSCEV(PrevIV); + const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); + if (!SE.isLoopInvariant(IncExpr, L)) + continue; + + if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { + LastIncExpr = IncExpr; + break; + } + } + // If we haven't found a chain, create a new one, unless we hit the max. Don't + // bother for phi nodes, because they must be last in the chain. + if (ChainIdx == NChains) { + if (isa(UserInst)) + return; + if (NChains >= MaxChains && !StressIVChain) { + DEBUG(dbgs() << "IV Chain Limit\n"); + return; + } + LastIncExpr = OperExpr; + // IVUsers may have skipped over sign/zero extensions. We don't currently + // attempt to form chains involving extensions unless they can be hoisted + // into this loop's AddRec. + if (!isa(LastIncExpr)) + return; + ++NChains; + IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), + OperExprBase)); + ChainUsersVec.resize(NChains); + DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst + << ") IV=" << *LastIncExpr << "\n"); + } else { + DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst + << ") IV+" << *LastIncExpr << "\n"); + // Add this IV user to the end of the chain. + IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); + } + + SmallPtrSet &NearUsers = ChainUsersVec[ChainIdx].NearUsers; + // This chain's NearUsers become FarUsers. + if (!LastIncExpr->isZero()) { + ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), + NearUsers.end()); + NearUsers.clear(); + } + + // All other uses of IVOperand become near uses of the chain. + // We currently ignore intermediate values within SCEV expressions, assuming + // they will eventually be used be the current chain, or can be computed + // from one of the chain increments. To be more precise we could + // transitively follow its user and only add leaf IV users to the set. + for (Value::use_iterator UseIter = IVOper->use_begin(), + UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) { + Instruction *OtherUse = dyn_cast(*UseIter); + if (!OtherUse || OtherUse == UserInst) + continue; + if (SE.isSCEVable(OtherUse->getType()) + && !isa(SE.getSCEV(OtherUse)) + && IU.isIVUserOrOperand(OtherUse)) { + continue; + } + NearUsers.insert(OtherUse); + } + + // Since this user is part of the chain, it's no longer considered a use + // of the chain. + ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); +} + +/// CollectChains - Populate the vector of Chains. +/// +/// This decreases ILP at the architecture level. Targets with ample registers, +/// multiple memory ports, and no register renaming probably don't want +/// this. However, such targets should probably disable LSR altogether. +/// +/// The job of LSR is to make a reasonable choice of induction variables across +/// the loop. Subsequent passes can easily "unchain" computation exposing more +/// ILP *within the loop* if the target wants it. +/// +/// Finding the best IV chain is potentially a scheduling problem. Since LSR +/// will not reorder memory operations, it will recognize this as a chain, but +/// will generate redundant IV increments. Ideally this would be corrected later +/// by a smart scheduler: +/// = A[i] +/// = A[i+x] +/// A[i] = +/// A[i+x] = +/// +/// TODO: Walk the entire domtree within this loop, not just the path to the +/// loop latch. This will discover chains on side paths, but requires +/// maintaining multiple copies of the Chains state. +void LSRInstance::CollectChains() { + DEBUG(dbgs() << "Collecting IV Chains.\n"); + SmallVector ChainUsersVec; + + SmallVector LatchPath; + BasicBlock *LoopHeader = L->getHeader(); + for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); + Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { + LatchPath.push_back(Rung->getBlock()); + } + LatchPath.push_back(LoopHeader); + + // Walk the instruction stream from the loop header to the loop latch. + for (SmallVectorImpl::reverse_iterator + BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend(); + BBIter != BBEnd; ++BBIter) { + for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); + I != E; ++I) { + // Skip instructions that weren't seen by IVUsers analysis. + if (isa(I) || !IU.isIVUserOrOperand(I)) + continue; + + // Ignore users that are part of a SCEV expression. This way we only + // consider leaf IV Users. This effectively rediscovers a portion of + // IVUsers analysis but in program order this time. + if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(I))) + continue; + + // Remove this instruction from any NearUsers set it may be in. + for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); + ChainIdx < NChains; ++ChainIdx) { + ChainUsersVec[ChainIdx].NearUsers.erase(I); + } + // Search for operands that can be chained. + SmallPtrSet UniqueOperands; + User::op_iterator IVOpEnd = I->op_end(); + User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); + while (IVOpIter != IVOpEnd) { + Instruction *IVOpInst = cast(*IVOpIter); + if (UniqueOperands.insert(IVOpInst)) + ChainInstruction(I, IVOpInst, ChainUsersVec); + IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + } + } // Continue walking down the instructions. + } // Continue walking down the domtree. + // Visit phi backedges to determine if the chain can generate the IV postinc. + for (BasicBlock::iterator I = L->getHeader()->begin(); + PHINode *PN = dyn_cast(I); ++I) { + if (!SE.isSCEVable(PN->getType())) + continue; + + Instruction *IncV = + dyn_cast(PN->getIncomingValueForBlock(L->getLoopLatch())); + if (IncV) + ChainInstruction(PN, IncV, ChainUsersVec); + } + // Remove any unprofitable chains. + unsigned ChainIdx = 0; + for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); + UsersIdx < NChains; ++UsersIdx) { + if (!isProfitableChain(IVChainVec[UsersIdx], + ChainUsersVec[UsersIdx].FarUsers, SE, TLI)) + continue; + // Preserve the chain at UsesIdx. + if (ChainIdx != UsersIdx) + IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; + FinalizeChain(IVChainVec[ChainIdx]); + ++ChainIdx; + } + IVChainVec.resize(ChainIdx); +} + +void LSRInstance::FinalizeChain(IVChain &Chain) { + assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); + DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); + + for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); + I != E; ++I) { + DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n"); + User::op_iterator UseI = + std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand); + assert(UseI != I->UserInst->op_end() && "cannot find IV operand"); + IVIncSet.insert(UseI); + } +} + +/// Return true if the IVInc can be folded into an addressing mode. +static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, + Value *Operand, const TargetLowering *TLI) { + const SCEVConstant *IncConst = dyn_cast(IncExpr); + if (!IncConst || !isAddressUse(UserInst, Operand)) + return false; + + if (IncConst->getValue()->getValue().getMinSignedBits() > 64) + return false; + + int64_t IncOffset = IncConst->getValue()->getSExtValue(); + if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false, + LSRUse::Address, getAccessType(UserInst), TLI)) + return false; + + return true; +} + +/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to +/// materialize the IV user's operand from the previous IV user's operand. +void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts) { + // Find the new IVOperand for the head of the chain. It may have been replaced + // by LSR. + const IVInc &Head = Chain.Incs[0]; + User::op_iterator IVOpEnd = Head.UserInst->op_end(); + User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), + IVOpEnd, L, SE); + Value *IVSrc = 0; + while (IVOpIter != IVOpEnd) { + IVSrc = getWideOperand(*IVOpIter); + + // If this operand computes the expression that the chain needs, we may use + // it. (Check this after setting IVSrc which is used below.) + // + // Note that if Head.IncExpr is wider than IVSrc, then this phi is too + // narrow for the chain, so we can no longer use it. We do allow using a + // wider phi, assuming the LSR checked for free truncation. In that case we + // should already have a truncate on this operand such that + // getSCEV(IVSrc) == IncExpr. + if (SE.getSCEV(*IVOpIter) == Head.IncExpr + || SE.getSCEV(IVSrc) == Head.IncExpr) { + break; + } + IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + } + if (IVOpIter == IVOpEnd) { + // Gracefully give up on this chain. + DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); + return; + } + + DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); + Type *IVTy = IVSrc->getType(); + Type *IntTy = SE.getEffectiveSCEVType(IVTy); + const SCEV *LeftOverExpr = 0; + for (IVChain::const_iterator IncI = Chain.begin(), + IncE = Chain.end(); IncI != IncE; ++IncI) { + + Instruction *InsertPt = IncI->UserInst; + if (isa(InsertPt)) + InsertPt = L->getLoopLatch()->getTerminator(); + + // IVOper will replace the current IV User's operand. IVSrc is the IV + // value currently held in a register. + Value *IVOper = IVSrc; + if (!IncI->IncExpr->isZero()) { + // IncExpr was the result of subtraction of two narrow values, so must + // be signed. + const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy); + LeftOverExpr = LeftOverExpr ? + SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; + } + if (LeftOverExpr && !LeftOverExpr->isZero()) { + // Expand the IV increment. + Rewriter.clearPostInc(); + Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); + const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), + SE.getUnknown(IncV)); + IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); + + // If an IV increment can't be folded, use it as the next IV value. + if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, + TLI)) { + assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); + IVSrc = IVOper; + LeftOverExpr = 0; + } + } + Type *OperTy = IncI->IVOperand->getType(); + if (IVTy != OperTy) { + assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && + "cannot extend a chained IV"); + IRBuilder<> Builder(InsertPt); + IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); + } + IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper); + DeadInsts.push_back(IncI->IVOperand); + } + // If LSR created a new, wider phi, we may also replace its postinc. We only + // do this if we also found a wide value for the head of the chain. + if (isa(Chain.tailUserInst())) { + for (BasicBlock::iterator I = L->getHeader()->begin(); + PHINode *Phi = dyn_cast(I); ++I) { + if (!isCompatibleIVType(Phi, IVSrc)) + continue; + Instruction *PostIncV = dyn_cast( + Phi->getIncomingValueForBlock(L->getLoopLatch())); + if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) + continue; + Value *IVOper = IVSrc; + Type *PostIncTy = PostIncV->getType(); + if (IVTy != PostIncTy) { + assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); + IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); + Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); + IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); + } + Phi->replaceUsesOfWith(PostIncV, IVOper); + DeadInsts.push_back(PostIncV); + } + } +} + void LSRInstance::CollectFixupsAndInitialFormulae() { for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + Instruction *UserInst = UI->getUser(); + // Skip IV users that are part of profitable IV Chains. + User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), + UI->getOperandValToReplace()); + assert(UseI != UserInst->op_end() && "cannot find IV operand"); + if (IVIncSet.count(UseI)) + continue; + // Record the uses. LSRFixup &LF = getNewFixup(); - LF.UserInst = UI->getUser(); + LF.UserInst = UserInst; LF.OperandValToReplace = UI->getOperandValToReplace(); LF.PostIncLoops = UI->getPostIncLoops(); LSRUse::KindType Kind = LSRUse::Basic; - const Type *AccessTy = 0; + Type *AccessTy = 0; if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { Kind = LSRUse::Address; AccessTy = getAccessType(LF.UserInst); @@ -1912,11 +2830,17 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { if (NV == LF.OperandValToReplace) { CI->setOperand(1, CI->getOperand(0)); CI->setOperand(0, NV); + NV = CI->getOperand(1); + Changed = true; } // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); - if (N->isLoopInvariant(L)) { + if (SE.isLoopInvariant(N, L)) { + // S is normalized, so normalize N before folding it into S + // to keep the result normalized. + N = TransformForPostIncUse(Normalize, N, CI, 0, + LF.PostIncLoops, SE, DT); Kind = LSRUse::ICmpZero; S = SE.getMinusSCEV(N, S); } @@ -1935,6 +2859,10 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); + if (!LU.WidestFixupType || + SE.getTypeSizeInBits(LU.WidestFixupType) < + SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) + LU.WidestFixupType = LF.OperandValToReplace->getType(); // If this is the first use of this LSRUse, give it a formula. if (LU.Formulae.empty()) { @@ -1946,14 +2874,19 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { DEBUG(print_fixups(dbgs())); } +/// InsertInitialFormula - Insert a formula for the given expression into +/// the given use, separating out loop-variant portions from loop-invariant +/// and loop-computable portions. void LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { Formula F; - F.InitialMatch(S, L, SE, DT); + F.InitialMatch(S, L, SE); bool Inserted = InsertFormula(LU, LUIdx, F); assert(Inserted && "Initial formula already exists!"); (void)Inserted; } +/// InsertSupplementalFormula - Insert a simple single-register formula for +/// the given expression into the given use. void LSRInstance::InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { @@ -1998,7 +2931,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { const SCEV *S = Worklist.pop_back_val(); if (const SCEVNAryExpr *N = dyn_cast(S)) - Worklist.insert(Worklist.end(), N->op_begin(), N->op_end()); + Worklist.append(N->op_begin(), N->op_end()); else if (const SCEVCastExpr *C = dyn_cast(S)) Worklist.push_back(C->getOperand()); else if (const SCEVUDivExpr *D = dyn_cast(S)) { @@ -2007,8 +2940,12 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { } else if (const SCEVUnknown *U = dyn_cast(S)) { if (!Inserted.insert(U)) continue; const Value *V = U->getValue(); - if (const Instruction *Inst = dyn_cast(V)) + if (const Instruction *Inst = dyn_cast(V)) { + // Look for instructions defined outside the loop. if (L->contains(Inst)) continue; + } else if (isa(V)) + // Undef doesn't have a live range, so it doesn't matter. + continue; for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; ++UI) { const Instruction *UserInst = dyn_cast(*UI); @@ -2043,7 +2980,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { if (const ICmpInst *ICI = dyn_cast(UserInst)) { unsigned OtherIdx = !UI.getOperandNo(); Value *OtherOp = const_cast(ICI->getOperand(OtherIdx)); - if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) + if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) continue; } @@ -2055,6 +2992,10 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); + if (!LU.WidestFixupType || + SE.getTypeSizeInBits(LU.WidestFixupType) < + SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) + LU.WidestFixupType = LF.OperandValToReplace->getType(); InsertSupplementalFormula(U, LU, LF.LUIdx); CountRegisters(LU.Formulae.back(), Uses.size() - 1); break; @@ -2067,20 +3008,24 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() { /// separate registers. If C is non-null, multiply each subexpression by C. static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, SmallVectorImpl &Ops, + const Loop *L, ScalarEvolution &SE) { if (const SCEVAddExpr *Add = dyn_cast(S)) { // Break out add operands. for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); I != E; ++I) - CollectSubexprs(*I, C, Ops, SE); + CollectSubexprs(*I, C, Ops, L, SE); return; } else if (const SCEVAddRecExpr *AR = dyn_cast(S)) { // Split a non-zero base out of an addrec. if (!AR->getStart()->isZero()) { CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), AR->getStepRecurrence(SE), - AR->getLoop()), C, Ops, SE); - CollectSubexprs(AR->getStart(), C, Ops, SE); + AR->getLoop(), + //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) + SCEV::FlagAnyWrap), + C, Ops, L, SE); + CollectSubexprs(AR->getStart(), C, Ops, L, SE); return; } } else if (const SCEVMulExpr *Mul = dyn_cast(S)) { @@ -2090,12 +3035,12 @@ static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, dyn_cast(Mul->getOperand(0))) { CollectSubexprs(Mul->getOperand(1), C ? cast(SE.getMulExpr(C, Op0)) : Op0, - Ops, SE); + Ops, L, SE); return; } } - // Otherwise use the value itself. + // Otherwise use the value itself, optionally with a scale applied. Ops.push_back(C ? SE.getMulExpr(C, S) : S); } @@ -2111,11 +3056,18 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, const SCEV *BaseReg = Base.BaseRegs[i]; SmallVector AddOps; - CollectSubexprs(BaseReg, 0, AddOps, SE); + CollectSubexprs(BaseReg, 0, AddOps, L, SE); + if (AddOps.size() == 1) continue; for (SmallVectorImpl::const_iterator J = AddOps.begin(), JE = AddOps.end(); J != JE; ++J) { + + // Loop-variant "unknown" values are uninteresting; we won't be able to + // do anything meaningful with them. + if (isa(*J) && !SE.isLoopInvariant(*J, L)) + continue; + // Don't pull a constant into a register if the constant could be folded // into an immediate field. if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, @@ -2124,11 +3076,10 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, continue; // Collect all operands except *J. - SmallVector InnerAddOps; - for (SmallVectorImpl::const_iterator K = AddOps.begin(), - KE = AddOps.end(); K != KE; ++K) - if (K != J) - InnerAddOps.push_back(*K); + SmallVector InnerAddOps + (((const SmallVector &)AddOps).begin(), J); + InnerAddOps.append + (llvm::next(J), ((const SmallVector &)AddOps).end()); // Don't leave just a constant behind in a register if the constant could // be folded into an immediate field. @@ -2142,8 +3093,29 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, if (InnerSum->isZero()) continue; Formula F = Base; - F.BaseRegs[i] = InnerSum; - F.BaseRegs.push_back(*J); + + // Add the remaining pieces of the add back into the new formula. + const SCEVConstant *InnerSumSC = dyn_cast(InnerSum); + if (TLI && InnerSumSC && + SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + InnerSumSC->getValue()->getZExtValue())) { + F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + + InnerSumSC->getValue()->getZExtValue(); + F.BaseRegs.erase(F.BaseRegs.begin() + i); + } else + F.BaseRegs[i] = InnerSum; + + // Add J as its own register, or an unfolded immediate. + const SCEVConstant *SC = dyn_cast(*J); + if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + + SC->getValue()->getZExtValue())) + F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + + SC->getValue()->getZExtValue(); + else + F.BaseRegs.push_back(*J); + if (InsertFormula(LU, LUIdx, F)) // If that formula hadn't been seen before, recurse to find more like // it. @@ -2165,8 +3137,8 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, for (SmallVectorImpl::const_iterator I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { const SCEV *BaseReg = *I; - if (BaseReg->properlyDominates(L->getHeader(), &DT) && - !BaseReg->hasComputableLoopEvolution(L)) + if (SE.properlyDominates(BaseReg, L->getHeader()) && + !SE.hasComputableLoopEvolution(BaseReg, L)) Ops.push_back(BaseReg); else F.BaseRegs.push_back(BaseReg); @@ -2209,7 +3181,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base) { // TODO: For now, just add the min and max offset, because it usually isn't // worthwhile looking at everything inbetween. - SmallVector Worklist; + SmallVector Worklist; Worklist.push_back(LU.MinOffset); if (LU.MaxOffset != LU.MinOffset) Worklist.push_back(LU.MaxOffset); @@ -2223,7 +3195,14 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind, LU.AccessTy, TLI)) { - F.BaseRegs[i] = SE.getAddExpr(G, SE.getConstant(G->getType(), *I)); + // Add the offset to the base register. + const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); + // If it cancelled out, drop the base register, otherwise update it. + if (NewG->isZero()) { + std::swap(F.BaseRegs[i], F.BaseRegs.back()); + F.BaseRegs.pop_back(); + } else + F.BaseRegs[i] = NewG; (void)InsertFormula(LU, LUIdx, F); } @@ -2249,7 +3228,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, if (LU.Kind != LSRUse::ICmpZero) return; // Determine the integer type for the base formula. - const Type *IntTy = Base.getType(); + Type *IntTy = Base.getType(); if (!IntTy) return; if (SE.getTypeSizeInBits(IntTy) > 64) return; @@ -2262,13 +3241,12 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, for (SmallSetVector::const_iterator I = Factors.begin(), E = Factors.end(); I != E; ++I) { int64_t Factor = *I; - Formula F = Base; // Check that the multiplication doesn't overflow. - if (F.AM.BaseOffs == INT64_MIN && Factor == -1) + if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) continue; - F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; - if (F.AM.BaseOffs / Factor != Base.AM.BaseOffs) + int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; + if (NewBaseOffs / Factor != Base.AM.BaseOffs) continue; // Check that multiplying with the use offset doesn't overflow. @@ -2279,6 +3257,9 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, if (Offset / Factor != LU.MinOffset) continue; + Formula F = Base; + F.AM.BaseOffs = NewBaseOffs; + // Check that this scale is legal. if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) continue; @@ -2302,6 +3283,15 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, continue; } + // Check that multiplying with the unfolded offset doesn't overflow. + if (F.UnfoldedOffset != 0) { + if (F.UnfoldedOffset == INT64_MIN && Factor == -1) + continue; + F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; + if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) + continue; + } + // If we make it here and it's legal, add it. (void)InsertFormula(LU, LUIdx, F); next:; @@ -2310,10 +3300,9 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, /// GenerateScales - Generate stride factor reuse formulae by making use of /// scaled-offset address modes, for example. -void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, - Formula Base) { +void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { // Determine the integer type for the base formula. - const Type *IntTy = Base.getType(); + Type *IntTy = Base.getType(); if (!IntTy) return; // If this Formula already has a scaled register, we can't add another one. @@ -2357,8 +3346,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, // TODO: This could be optimized to avoid all the copying. Formula F = Base; F.ScaledReg = Quotient; - std::swap(F.BaseRegs[i], F.BaseRegs.back()); - F.BaseRegs.pop_back(); + F.DeleteBaseReg(F.BaseRegs[i]); (void)InsertFormula(LU, LUIdx, F); } } @@ -2366,8 +3354,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, } /// GenerateTruncates - Generate reuse formulae from different IV types. -void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, - Formula Base) { +void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { // This requires TargetLowering to tell us which truncates are free. if (!TLI) return; @@ -2375,13 +3362,13 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, if (Base.AM.BaseGV) return; // Determine the integer type for the base formula. - const Type *DstTy = Base.getType(); + Type *DstTy = Base.getType(); if (!DstTy) return; DstTy = SE.getEffectiveSCEVType(DstTy); - for (SmallSetVector::const_iterator + for (SmallSetVector::const_iterator I = Types.begin(), E = Types.end(); I != E; ++I) { - const Type *SrcTy = *I; + Type *SrcTy = *I; if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { Formula F = Base; @@ -2487,7 +3474,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // other orig regs. ImmMapTy::const_iterator OtherImms[] = { Imms.begin(), prior(Imms.end()), - Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) + Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) }; for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { ImmMapTy::const_iterator M = OtherImms[i]; @@ -2518,13 +3505,13 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { int64_t Imm = WI.Imm; const SCEV *OrigReg = WI.OrigReg; - const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); + Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); unsigned BitWidth = SE.getTypeSizeInBits(IntTy); // TODO: Use a more targeted data structure. for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { - Formula F = LU.Formulae[L]; + const Formula &F = LU.Formulae[L]; // Use the immediate in the scaled register. if (F.ScaledReg == OrigReg) { int64_t Offs = (uint64_t)F.AM.BaseOffs + @@ -2544,7 +3531,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // value to the immediate would produce a value closer to zero than the // immediate itself, then the formula isn't worthwhile. if (const SCEVConstant *C = dyn_cast(NewF.ScaledReg)) - if (C->getValue()->getValue().isNegative() != + if (C->getValue()->isNegative() != (NewF.AM.BaseOffs < 0) && (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) .ule(abs64(NewF.AM.BaseOffs))) @@ -2561,8 +3548,13 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { Formula NewF = F; NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, - LU.Kind, LU.AccessTy, TLI)) - continue; + LU.Kind, LU.AccessTy, TLI)) { + if (!TLI || + !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) + continue; + NewF = F; + NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; + } NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); // If the new formula has a constant in a register, and adding the @@ -2619,13 +3611,20 @@ LSRInstance::GenerateAllReuseFormulae() { } GenerateCrossUseConstantOffsets(); + + DEBUG(dbgs() << "\n" + "After generating reuse formulae:\n"; + print_uses(dbgs())); } -/// If their are multiple formulae with the same set of registers used +/// If there are multiple formulae with the same set of registers used /// by other uses, pick the best one and delete the others. void LSRInstance::FilterOutUndesirableDedicatedRegisters() { + DenseSet VisitedRegs; + SmallPtrSet Regs; + SmallPtrSet LoserRegs; #ifndef NDEBUG - bool Changed = false; + bool ChangedFormulae = false; #endif // Collect the best formula for each unique set of shared registers. This @@ -2636,47 +3635,73 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; - FormulaSorter Sorter(L, LU, SE, DT); - DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << "\n"); + DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); bool Any = false; for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; ++FIdx) { Formula &F = LU.Formulae[FIdx]; - SmallVector Key; - for (SmallVectorImpl::const_iterator J = F.BaseRegs.begin(), - JE = F.BaseRegs.end(); J != JE; ++J) { - const SCEV *Reg = *J; - if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) - Key.push_back(Reg); + // Some formulas are instant losers. For example, they may depend on + // nonexistent AddRecs from other loops. These need to be filtered + // immediately, otherwise heuristics could choose them over others leading + // to an unsatisfactory solution. Passing LoserRegs into RateFormula here + // avoids the need to recompute this information across formulae using the + // same bad AddRec. Passing LoserRegs is also essential unless we remove + // the corresponding bad register from the Regs set. + Cost CostF; + Regs.clear(); + CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, + &LoserRegs); + if (CostF.isLoser()) { + // During initial formula generation, undesirable formulae are generated + // by uses within other loops that have some non-trivial address mode or + // use the postinc form of the IV. LSR needs to provide these formulae + // as the basis of rediscovering the desired formula that uses an AddRec + // corresponding to the existing phi. Once all formulae have been + // generated, these initial losers may be pruned. + DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); + dbgs() << "\n"); } - if (F.ScaledReg && - RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) - Key.push_back(F.ScaledReg); - // Unstable sort by host order ok, because this is only used for - // uniquifying. - std::sort(Key.begin(), Key.end()); - - std::pair P = - BestFormulae.insert(std::make_pair(Key, FIdx)); - if (!P.second) { + else { + SmallVector Key; + for (SmallVectorImpl::const_iterator J = F.BaseRegs.begin(), + JE = F.BaseRegs.end(); J != JE; ++J) { + const SCEV *Reg = *J; + if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) + Key.push_back(Reg); + } + if (F.ScaledReg && + RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) + Key.push_back(F.ScaledReg); + // Unstable sort by host order ok, because this is only used for + // uniquifying. + std::sort(Key.begin(), Key.end()); + + std::pair P = + BestFormulae.insert(std::make_pair(Key, FIdx)); + if (P.second) + continue; + Formula &Best = LU.Formulae[P.first->second]; - if (Sorter.operator()(F, Best)) + + Cost CostBest; + Regs.clear(); + CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); + if (CostF < CostBest) std::swap(F, Best); DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); dbgs() << "\n" " in favor of formula "; Best.print(dbgs()); dbgs() << '\n'); + } #ifndef NDEBUG - Changed = true; + ChangedFormulae = true; #endif - LU.DeleteFormula(F); - --FIdx; - --NumForms; - Any = true; - continue; - } + LU.DeleteFormula(F); + --FIdx; + --NumForms; + Any = true; } // Now that we've filtered out some formulae, recompute the Regs set. @@ -2687,7 +3712,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { BestFormulae.clear(); } - DEBUG(if (Changed) { + DEBUG(if (ChangedFormulae) { dbgs() << "\n" "After filtering out undesirable candidates:\n"; print_uses(dbgs()); @@ -2702,7 +3727,7 @@ static const size_t ComplexityLimit = UINT16_MAX; /// this many solutions because it prune the search space, but the pruning /// isn't always sufficient. size_t LSRInstance::EstimateSearchSpaceComplexity() const { - uint32_t Power = 1; + size_t Power = 1; for (SmallVectorImpl::const_iterator I = Uses.begin(), E = Uses.end(); I != E; ++I) { size_t FSize = I->Formulae.size(); @@ -2717,11 +3742,178 @@ size_t LSRInstance::EstimateSearchSpaceComplexity() const { return Power; } -/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of -/// formulae to choose from, use some rough heuristics to prune down the number -/// of formulae. This keeps the main solver from taking an extraordinary amount -/// of time in some worst-case scenarios. -void LSRInstance::NarrowSearchSpaceUsingHeuristics() { +/// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset +/// of the registers of another formula, it won't help reduce register +/// pressure (though it may not necessarily hurt register pressure); remove +/// it to simplify the system. +void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { + if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { + DEBUG(dbgs() << "The search space is too complex.\n"); + + DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " + "which use a superset of registers used by other " + "formulae.\n"); + + for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { + LSRUse &LU = Uses[LUIdx]; + bool Any = false; + for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { + Formula &F = LU.Formulae[i]; + // Look for a formula with a constant or GV in a register. If the use + // also has a formula with that same value in an immediate field, + // delete the one that uses a register. + for (SmallVectorImpl::const_iterator + I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { + if (const SCEVConstant *C = dyn_cast(*I)) { + Formula NewF = F; + NewF.AM.BaseOffs += C->getValue()->getSExtValue(); + NewF.BaseRegs.erase(NewF.BaseRegs.begin() + + (I - F.BaseRegs.begin())); + if (LU.HasFormulaWithSameRegs(NewF)) { + DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); + LU.DeleteFormula(F); + --i; + --e; + Any = true; + break; + } + } else if (const SCEVUnknown *U = dyn_cast(*I)) { + if (GlobalValue *GV = dyn_cast(U->getValue())) + if (!F.AM.BaseGV) { + Formula NewF = F; + NewF.AM.BaseGV = GV; + NewF.BaseRegs.erase(NewF.BaseRegs.begin() + + (I - F.BaseRegs.begin())); + if (LU.HasFormulaWithSameRegs(NewF)) { + DEBUG(dbgs() << " Deleting "; F.print(dbgs()); + dbgs() << '\n'); + LU.DeleteFormula(F); + --i; + --e; + Any = true; + break; + } + } + } + } + } + if (Any) + LU.RecomputeRegs(LUIdx, RegUses); + } + + DEBUG(dbgs() << "After pre-selection:\n"; + print_uses(dbgs())); + } +} + +/// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers +/// for expressions like A, A+1, A+2, etc., allocate a single register for +/// them. +void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { + if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { + DEBUG(dbgs() << "The search space is too complex.\n"); + + DEBUG(dbgs() << "Narrowing the search space by assuming that uses " + "separated by a constant offset will use the same " + "registers.\n"); + + // This is especially useful for unrolled loops. + + for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { + LSRUse &LU = Uses[LUIdx]; + for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), + E = LU.Formulae.end(); I != E; ++I) { + const Formula &F = *I; + if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { + if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { + if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, + /*HasBaseReg=*/false, + LU.Kind, LU.AccessTy)) { + DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); + dbgs() << '\n'); + + LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; + + // Update the relocs to reference the new use. + for (SmallVectorImpl::iterator I = Fixups.begin(), + E = Fixups.end(); I != E; ++I) { + LSRFixup &Fixup = *I; + if (Fixup.LUIdx == LUIdx) { + Fixup.LUIdx = LUThatHas - &Uses.front(); + Fixup.Offset += F.AM.BaseOffs; + // Add the new offset to LUThatHas' offset list. + if (LUThatHas->Offsets.back() != Fixup.Offset) { + LUThatHas->Offsets.push_back(Fixup.Offset); + if (Fixup.Offset > LUThatHas->MaxOffset) + LUThatHas->MaxOffset = Fixup.Offset; + if (Fixup.Offset < LUThatHas->MinOffset) + LUThatHas->MinOffset = Fixup.Offset; + } + DEBUG(dbgs() << "New fixup has offset " + << Fixup.Offset << '\n'); + } + if (Fixup.LUIdx == NumUses-1) + Fixup.LUIdx = LUIdx; + } + + // Delete formulae from the new use which are no longer legal. + bool Any = false; + for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { + Formula &F = LUThatHas->Formulae[i]; + if (!isLegalUse(F.AM, + LUThatHas->MinOffset, LUThatHas->MaxOffset, + LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { + DEBUG(dbgs() << " Deleting "; F.print(dbgs()); + dbgs() << '\n'); + LUThatHas->DeleteFormula(F); + --i; + --e; + Any = true; + } + } + if (Any) + LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); + + // Delete the old use. + DeleteUse(LU, LUIdx); + --LUIdx; + --NumUses; + break; + } + } + } + } + } + + DEBUG(dbgs() << "After pre-selection:\n"; + print_uses(dbgs())); + } +} + +/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call +/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that +/// we've done more filtering, as it may be able to find more formulae to +/// eliminate. +void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ + if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { + DEBUG(dbgs() << "The search space is too complex.\n"); + + DEBUG(dbgs() << "Narrowing the search space by re-filtering out " + "undesirable dedicated registers.\n"); + + FilterOutUndesirableDedicatedRegisters(); + + DEBUG(dbgs() << "After pre-selection:\n"; + print_uses(dbgs())); + } +} + +/// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely +/// to be profitable, and then in any use which has any reference to that +/// register, delete all formulae which do not reference that register. +void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { + // With all other options exhausted, loop until the system is simple + // enough to handle. SmallPtrSet Taken; while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { // Ok, we have too many of formulae on our hands to conveniently handle. @@ -2781,6 +3973,17 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() { } } +/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of +/// formulae to choose from, use some rough heuristics to prune down the number +/// of formulae. This keeps the main solver from taking an extraordinary amount +/// of time in some worst-case scenarios. +void LSRInstance::NarrowSearchSpaceUsingHeuristics() { + NarrowSearchSpaceByDetectingSupersets(); + NarrowSearchSpaceByCollapsingUnrolledCode(); + NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); + NarrowSearchSpaceByPickingWinnerRegs(); +} + /// SolveRecurse - This is the recursive solver. void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, Cost &SolutionCost, @@ -2810,24 +4013,29 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, if (LU.Regs.count(*I)) ReqRegs.insert(*I); - bool AnySatisfiedReqRegs = false; SmallPtrSet NewRegs; Cost NewCost; -retry: for (SmallVectorImpl::const_iterator I = LU.Formulae.begin(), E = LU.Formulae.end(); I != E; ++I) { const Formula &F = *I; // Ignore formulae which do not use any of the required registers. + bool SatisfiedReqReg = true; for (SmallSetVector::const_iterator J = ReqRegs.begin(), JE = ReqRegs.end(); J != JE; ++J) { const SCEV *Reg = *J; if ((!F.ScaledReg || F.ScaledReg != Reg) && std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == - F.BaseRegs.end()) - goto skip; + F.BaseRegs.end()) { + SatisfiedReqReg = false; + break; + } + } + if (!SatisfiedReqReg) { + // If none of the formulae satisfied the required registers, then we could + // clear ReqRegs and try again. Currently, we simply give up in this case. + continue; } - AnySatisfiedReqRegs = true; // Evaluate the cost of the current formula. If it's already worse than // the current best, prune the search at that point. @@ -2843,7 +4051,7 @@ retry: VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); } else { DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); - dbgs() << ". Regs:"; + dbgs() << ".\n Regs:"; for (SmallPtrSet::const_iterator I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) dbgs() << ' ' << **I; @@ -2854,18 +4062,11 @@ retry: } Workspace.pop_back(); } - skip:; - } - - // If none of the formulae had all of the required registers, relax the - // constraint so that we don't exclude all formulae. - if (!AnySatisfiedReqRegs) { - assert(!ReqRegs.empty() && "Solver failed even without required registers"); - ReqRegs.clear(); - goto retry; } } +/// Solve - Choose one formula from each use. Return the results in the given +/// Solution vector. void LSRInstance::Solve(SmallVectorImpl &Solution) const { SmallVector Workspace; Cost SolutionCost; @@ -2875,8 +4076,13 @@ void LSRInstance::Solve(SmallVectorImpl &Solution) const { DenseSet VisitedRegs; Workspace.reserve(Uses.size()); + // SolveRecurse does all the work. SolveRecurse(Solution, SolutionCost, Workspace, CurCost, CurRegs, VisitedRegs); + if (Solution.empty()) { + DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); + return; + } // Ok, we've now made all our decisions. DEBUG(dbgs() << "\n" @@ -2890,17 +4096,8 @@ void LSRInstance::Solve(SmallVectorImpl &Solution) const { Solution[i]->print(dbgs()); dbgs() << '\n'; }); -} -/// getImmediateDominator - A handy utility for the specific DominatorTree -/// query that we need here. -/// -static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) { - DomTreeNode *Node = DT.getNode(BB); - if (!Node) return 0; - Node = Node->getIDom(); - if (!Node) return 0; - return Node->getBlock(); + assert(Solution.size() == Uses.size() && "Malformed solution!"); } /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up @@ -2916,9 +4113,11 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; BasicBlock *IDom; - for (BasicBlock *Rung = IP->getParent(); ; Rung = IDom) { - IDom = getImmediateDominator(Rung, DT); - if (!IDom) return IP; + for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { + if (!Rung) return IP; + Rung = Rung->getIDom(); + if (!Rung) return IP; + IDom = Rung->getBlock(); // Don't climb into a loop though. const Loop *IDomLoop = LI.getLoopFor(IDom); @@ -2941,7 +4140,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // Attempt to find an insert position in the middle of the block, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && - (!BetterPos || DT.dominates(BetterPos, Inst))) + (!BetterPos || !DT.dominates(Inst, BetterPos))) BetterPos = llvm::next(BasicBlock::iterator(Inst)); } if (!AllDominate) @@ -2958,9 +4157,10 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, /// AdjustInsertPositionForExpand - Determine an input position which will be /// dominated by the operands and which will dominate the result. BasicBlock::iterator -LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, +LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, const LSRFixup &LF, - const LSRUse &LU) const { + const LSRUse &LU, + SCEVExpander &Rewriter) const { // Collect some instructions which must be dominated by the // expanding replacement. These must be dominated by any operands that // will be required in the expansion. @@ -2995,19 +4195,33 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, } } + assert(!isa(LowestIP) && !isa(LowestIP) + && !isa(LowestIP) && + "Insertion point must be a normal instruction"); + // Then, climb up the immediate dominator tree as far as we can go while // still being dominated by the input positions. - IP = HoistInsertPosition(IP, Inputs); + BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); // Don't insert instructions before PHI nodes. while (isa(IP)) ++IP; + // Ignore landingpad instructions. + while (isa(IP)) ++IP; + // Ignore debug intrinsics. while (isa(IP)) ++IP; + // Set IP below instructions recently inserted by SCEVExpander. This keeps the + // IP consistent across expansions and allows the previously inserted + // instructions to be reused by subsequent expansion. + while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; + return IP; } +/// Expand - Emit instructions for the leading candidate expression for this +/// LSRUse (this is called "expanding"). Value *LSRInstance::Expand(const LSRFixup &LF, const Formula &F, BasicBlock::iterator IP, @@ -3017,16 +4231,16 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Determine an input position which will be dominated by the operands and // which will dominate the result. - IP = AdjustInsertPositionForExpand(IP, LF, LU); + IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); // Inform the Rewriter if we have a post-increment use, so that it can // perform an advantageous expansion. Rewriter.setPostInc(LF.PostIncLoops); // This is the type that the user actually needs. - const Type *OpTy = LF.OperandValToReplace->getType(); + Type *OpTy = LF.OperandValToReplace->getType(); // This will be the type that we'll initially expand to. - const Type *Ty = F.getType(); + Type *Ty = F.getType(); if (!Ty) // No type known; just expand directly to the ultimate type. Ty = OpTy; @@ -3034,7 +4248,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Expand directly to the ultimate type if it's the right size. Ty = OpTy; // This is the type to do integer arithmetic in. - const Type *IntTy = SE.getEffectiveSCEVType(Ty); + Type *IntTy = SE.getEffectiveSCEVType(Ty); // Build up a list of operands to add together to form the full base. SmallVector Ops; @@ -3111,7 +4325,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // The other interesting way of "folding" with an ICmpZero is to use a // negated immediate. if (!ICmpScaledV) - ICmpScaledV = ConstantInt::get(IntTy, -Offset); + ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); else { Ops.push_back(SE.getUnknown(ICmpScaledV)); ICmpScaledV = ConstantInt::get(IntTy, Offset); @@ -3123,6 +4337,14 @@ Value *LSRInstance::Expand(const LSRFixup &LF, } } + // Expand the unfolded offset portion. + int64_t UnfoldedOffset = F.UnfoldedOffset; + if (UnfoldedOffset != 0) { + // Just add the immediate values. + Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, + UnfoldedOffset))); + } + // Emit instructions summing all the operands. const SCEV *FullS = Ops.empty() ? SE.getConstant(IntTy, 0) : @@ -3186,21 +4408,33 @@ void LSRInstance::RewriteForPHI(PHINode *PN, // is the canonical backedge for this loop, which complicates post-inc // users. if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && - !isa(BB->getTerminator()) && - (PN->getParent() != L->getHeader() || !L->contains(BB))) { - // Split the critical edge. - BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); - - // If PN is outside of the loop and BB is in the loop, we want to - // move the block to be immediately before the PHI block, not - // immediately after BB. - if (L->contains(BB) && !L->contains(PN)) - NewBB->moveBefore(PN->getParent()); - - // Splitting the edge can reduce the number of PHI entries we have. - e = PN->getNumIncomingValues(); - BB = NewBB; - i = PN->getBasicBlockIndex(BB); + !isa(BB->getTerminator())) { + BasicBlock *Parent = PN->getParent(); + Loop *PNLoop = LI.getLoopFor(Parent); + if (!PNLoop || Parent != PNLoop->getHeader()) { + // Split the critical edge. + BasicBlock *NewBB = 0; + if (!Parent->isLandingPad()) { + NewBB = SplitCriticalEdge(BB, Parent, P, + /*MergeIdenticalEdges=*/true, + /*DontDeleteUselessPhis=*/true); + } else { + SmallVector NewBBs; + SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); + NewBB = NewBBs[0]; + } + + // If PN is outside of the loop and BB is in the loop, we want to + // move the block to be immediately before the PHI block, not + // immediately after BB. + if (L->contains(BB) && !L->contains(PN)) + NewBB->moveBefore(PN->getParent()); + + // Splitting the edge can reduce the number of PHI entries we have. + e = PN->getNumIncomingValues(); + BB = NewBB; + i = PN->getBasicBlockIndex(BB); + } } std::pair::iterator, bool> Pair = @@ -3211,7 +4445,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN, Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. - const Type *OpTy = LF.OperandValToReplace->getType(); + Type *OpTy = LF.OperandValToReplace->getType(); if (FullV->getType() != OpTy) FullV = CastInst::Create(CastInst::getCastOpcode(FullV, false, @@ -3241,7 +4475,7 @@ void LSRInstance::Rewrite(const LSRFixup &LF, Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. - const Type *OpTy = LF.OperandValToReplace->getType(); + Type *OpTy = LF.OperandValToReplace->getType(); if (FullV->getType() != OpTy) { Instruction *Cast = CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), @@ -3263,6 +4497,8 @@ void LSRInstance::Rewrite(const LSRFixup &LF, DeadInsts.push_back(LF.OperandValToReplace); } +/// ImplementSolution - Rewrite all the fixup locations with new values, +/// following the chosen solution. void LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, Pass *P) { @@ -3270,19 +4506,36 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, // we can remove them after we are done working. SmallVector DeadInsts; - SCEVExpander Rewriter(SE); + SCEVExpander Rewriter(SE, "lsr"); +#ifndef NDEBUG + Rewriter.setDebugType(DEBUG_TYPE); +#endif Rewriter.disableCanonicalMode(); + Rewriter.enableLSRMode(); Rewriter.setIVIncInsertPos(L, IVIncInsertPos); + // Mark phi nodes that terminate chains so the expander tries to reuse them. + for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), + ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { + if (PHINode *PN = dyn_cast(ChainI->tailUserInst())) + Rewriter.setChainedPhi(PN); + } + // Expand the new value definitions and update the users. - for (size_t i = 0, e = Fixups.size(); i != e; ++i) { - size_t LUIdx = Fixups[i].LUIdx; + for (SmallVectorImpl::const_iterator I = Fixups.begin(), + E = Fixups.end(); I != E; ++I) { + const LSRFixup &Fixup = *I; - Rewrite(Fixups[i], *Solution[LUIdx], Rewriter, DeadInsts, P); + Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); Changed = true; } + for (SmallVectorImpl::const_iterator ChainI = IVChainVec.begin(), + ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { + GenerateIVChain(*ChainI, Rewriter, DeadInsts); + Changed = true; + } // Clean up after ourselves. This must be done before deleting any // instructions. Rewriter.clear(); @@ -3298,26 +4551,64 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { // If LoopSimplify form is not available, stay out of trouble. - if (!L->isLoopSimplifyForm()) return; + if (!L->isLoopSimplifyForm()) + return; // If there's no interesting work to be done, bail early. if (IU.empty()) return; + // If there's too much analysis to be done, bail early. We won't be able to + // model the problem anyway. + unsigned NumUsers = 0; + for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + if (++NumUsers > MaxIVUsers) { + DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L + << "\n"); + return; + } + } + +#ifndef NDEBUG + // All dominating loops must have preheaders, or SCEVExpander may not be able + // to materialize an AddRecExpr whose Start is an outer AddRecExpr. + // + // IVUsers analysis should only create users that are dominated by simple loop + // headers. Since this loop should dominate all of its users, its user list + // should be empty if this loop itself is not within a simple loop nest. + for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); + Rung; Rung = Rung->getIDom()) { + BasicBlock *BB = Rung->getBlock(); + const Loop *DomLoop = LI.getLoopFor(BB); + if (DomLoop && DomLoop->getHeader() == BB) { + assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); + } + } +#endif // DEBUG + DEBUG(dbgs() << "\nLSR on loop "; WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); dbgs() << ":\n"); - /// OptimizeShadowIV - If IV is used in a int-to-float cast - /// inside the loop then try to eliminate the cast operation. + // First, perform some low-level loop optimizations. OptimizeShadowIV(); + OptimizeLoopTermCond(); - // Change loop terminating condition to use the postinc iv when possible. - Changed |= OptimizeLoopTermCond(); + // If loop preparation eliminates all interesting IV users, bail. + if (IU.empty()) return; + // Skip nested loops until we can model them better with formulae. + if (!L->empty()) { + DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); + return; + } + + // Start collecting data and preparing for the solver. + CollectChains(); CollectInterestingTypesAndFactors(); CollectFixupsAndInitialFormulae(); CollectLoopInvariantFixupsAndFormulae(); + assert(!Uses.empty() && "IVUsers reported at least one use"); DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; print_uses(dbgs())); @@ -3325,22 +4616,20 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) // to formulate the values needed for the uses. GenerateAllReuseFormulae(); - DEBUG(dbgs() << "\n" - "After generating reuse formulae:\n"; - print_uses(dbgs())); - FilterOutUndesirableDedicatedRegisters(); NarrowSearchSpaceUsingHeuristics(); SmallVector Solution; Solve(Solution); - assert(Solution.size() == Uses.size() && "Malformed solution!"); // Release memory that is no longer needed. Factors.clear(); Types.clear(); RegUses.clear(); + if (Solution.empty()) + return; + #ifndef NDEBUG // Formulae should be legal. for (SmallVectorImpl::const_iterator I = Uses.begin(), @@ -3371,7 +4660,7 @@ void LSRInstance::print_factors_and_types(raw_ostream &OS) const { OS << '*' << *I; } - for (SmallSetVector::const_iterator + for (SmallSetVector::const_iterator I = Types.begin(), E = Types.end(); I != E; ++I) { if (!First) OS << ", "; First = false; @@ -3384,9 +4673,8 @@ void LSRInstance::print_fixups(raw_ostream &OS) const { OS << "LSR is examining the following fixup sites:\n"; for (SmallVectorImpl::const_iterator I = Fixups.begin(), E = Fixups.end(); I != E; ++I) { - const LSRFixup &LF = *I; dbgs() << " "; - LF.print(OS); + I->print(OS); OS << '\n'; } } @@ -3437,21 +4725,30 @@ private: } char LoopStrengthReduce::ID = 0; -static RegisterPass -X("loop-reduce", "Loop Strength Reduction"); +INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", + "Loop Strength Reduction", false, false) +INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(IVUsers) +INITIALIZE_PASS_DEPENDENCY(LoopInfo) +INITIALIZE_PASS_DEPENDENCY(LoopSimplify) +INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", + "Loop Strength Reduction", false, false) + Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { return new LoopStrengthReduce(TLI); } LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) - : LoopPass(&ID), TLI(tli) {} + : LoopPass(ID), TLI(tli) { + initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); + } void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { // We split critical edges, so we change the CFG. However, we do update // many analyses if they are around. AU.addPreservedID(LoopSimplifyID); - AU.addPreserved("domfrontier"); AU.addRequired(); AU.addPreserved(); @@ -3460,6 +4757,9 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); + // Requiring LoopSimplify a second time here prevents IVUsers from running + // twice, since LoopSimplify was invalidated by running ScalarEvolution. + AU.addRequiredID(LoopSimplifyID); AU.addRequired(); AU.addPreserved(); } @@ -3470,9 +4770,21 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { // Run the main LSR transformation. Changed |= LSRInstance(TLI, L, this).getChanged(); - // At this point, it is worth checking to see if any recurrence PHIs are also - // dead, so that we can remove them as well. + // Remove any extra phis created by processing inner loops. Changed |= DeleteDeadPHIs(L->getHeader()); - + if (EnablePhiElim) { + SmallVector DeadInsts; + SCEVExpander Rewriter(getAnalysis(), "lsr"); +#ifndef NDEBUG + Rewriter.setDebugType(DEBUG_TYPE); +#endif + unsigned numFolded = Rewriter. + replaceCongruentIVs(L, &getAnalysis(), DeadInsts, TLI); + if (numFolded) { + Changed = true; + DeleteTriviallyDeadInstructions(DeadInsts); + DeleteDeadPHIs(L->getHeader()); + } + } return Changed; }