X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=lib%2FTransforms%2FScalar%2FIndVarSimplify.cpp;h=27e5ec8cf40955fd9d7ffffd74f50018ed38e5e5;hb=e35ac41a3a965d18e0ada76cafa54b5b0814982a;hp=dee3d38d72af3577bb9e720c80a0c5e7364ad603;hpb=5614769d557600fda3cf73b481581fe32fbff258;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index dee3d38d72a..27e5ec8cf40 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -11,17 +11,6 @@ // computations derived from them) into simpler forms suitable for subsequent // analysis and transformation. // -// This transformation makes the following changes to each loop with an -// identifiable induction variable: -// 1. All loops are transformed to have a SINGLE canonical induction variable -// which starts at zero and steps by one. -// 2. The canonical induction variable is guaranteed to be the first PHI node -// in the loop header block. -// 3. The canonical induction variable is guaranteed to be in a wide enough -// type so that IV expressions need not be (directly) zero-extended or -// sign-extended. -// 4. Any pointer arithmetic recurrences are raised to use array subscripts. -// // If the trip count of a loop is computable, this pass also makes the following // changes: // 1. The exit condition for the loop is canonicalized to compare the @@ -33,90 +22,85 @@ // purpose of the loop is to compute the exit value of some derived // expression, this transformation will make the loop dead. // -// This transformation should be followed by strength reduction after all of the -// desired loop transformations have been performed. -// //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "indvars" #include "llvm/Transforms/Scalar.h" -#include "llvm/BasicBlock.h" -#include "llvm/Constants.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Type.h" -#include "llvm/Analysis/Dominators.h" -#include "llvm/Analysis/IVUsers.h" -#include "llvm/Analysis/ScalarEvolutionExpander.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Support/CFG.h" +#include "llvm/Analysis/ScalarEvolutionExpander.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Target/TargetData.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/SimplifyIndVar.h" using namespace llvm; -STATISTIC(NumRemoved , "Number of aux indvars removed"); +#define DEBUG_TYPE "indvars" + STATISTIC(NumWidened , "Number of indvars widened"); -STATISTIC(NumInserted , "Number of canonical indvars added"); STATISTIC(NumReplaced , "Number of exit values replaced"); STATISTIC(NumLFTR , "Number of loop exit tests replaced"); -STATISTIC(NumElimIdentity, "Number of IV identities eliminated"); STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated"); -STATISTIC(NumElimRem , "Number of IV remainder operations eliminated"); -STATISTIC(NumElimCmp , "Number of IV comparisons eliminated"); STATISTIC(NumElimIV , "Number of congruent IVs eliminated"); -static cl::opt DisableIVRewrite( - "disable-iv-rewrite", cl::Hidden, - cl::desc("Disable canonical induction variable rewriting")); +// Trip count verification can be enabled by default under NDEBUG if we +// implement a strong expression equivalence checker in SCEV. Until then, we +// use the verify-indvars flag, which may assert in some cases. +static cl::opt VerifyIndvars( + "verify-indvars", cl::Hidden, + cl::desc("Verify the ScalarEvolution result after running indvars")); + +static cl::opt ReduceLiveIVs("liv-reduce", cl::Hidden, + cl::desc("Reduce live induction variables.")); namespace { class IndVarSimplify : public LoopPass { - IVUsers *IU; LoopInfo *LI; ScalarEvolution *SE; DominatorTree *DT; - TargetData *TD; + const DataLayout *DL; + TargetLibraryInfo *TLI; SmallVector DeadInsts; bool Changed; public: static char ID; // Pass identification, replacement for typeid - IndVarSimplify() : LoopPass(ID), IU(0), LI(0), SE(0), DT(0), TD(0), - Changed(false) { + IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr), + DL(nullptr), Changed(false) { initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry()); } - virtual bool runOnLoop(Loop *L, LPPassManager &LPM); + bool runOnLoop(Loop *L, LPPassManager &LPM) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); - if (!DisableIVRewrite) - AU.addRequired(); AU.addPreserved(); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); - if (!DisableIVRewrite) - AU.addPreserved(); AU.setPreservesCFG(); } private: - virtual void releaseMemory() { + void releaseMemory() override { DeadInsts.clear(); } @@ -125,24 +109,12 @@ namespace { void HandleFloatingPointIV(Loop *L, PHINode *PH); void RewriteNonIntegerIVs(Loop *L); - void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); + void SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM); - void SimplifyIVUsers(SCEVExpander &Rewriter); - void SimplifyIVUsersNoRewrite(Loop *L, SCEVExpander &Rewriter); - - bool EliminateIVUser(Instruction *UseInst, Instruction *IVOperand); - void EliminateIVComparison(ICmpInst *ICmp, Value *IVOperand); - void EliminateIVRemainder(BinaryOperator *Rem, - Value *IVOperand, - bool IsSigned); - - void SimplifyCongruentIVs(Loop *L); - - void RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter); + void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); - ICmpInst *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, - PHINode *IndVar, - SCEVExpander &Rewriter); + Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, + PHINode *IndVar, SCEVExpander &Rewriter); void SinkUnusedInvariants(Loop *L); }; @@ -151,12 +123,11 @@ namespace { char IndVarSimplify::ID = 0; INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars", "Induction Variable Simplification", false, false) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfo) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) -INITIALIZE_PASS_DEPENDENCY(IVUsers) INITIALIZE_PASS_END(IndVarSimplify, "indvars", "Induction Variable Simplification", false, false) @@ -198,6 +169,11 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { // base of a recurrence. This handles the case in which SCEV expansion // converts a pointer type recurrence into a nonrecurrent pointer base // indexed by an integer recurrence. + + // If the GEP base pointer is a vector of pointers, abort. + if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy()) + return false; + const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr)); const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr)); if (FromBase == ToBase) @@ -211,6 +187,36 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { return true; } +/// Determine the insertion point for this user. By default, insert immediately +/// before the user. SCEVExpander or LICM will hoist loop invariants out of the +/// loop. For PHI nodes, there may be multiple uses, so compute the nearest +/// common dominator for the incoming blocks. +static Instruction *getInsertPointForUses(Instruction *User, Value *Def, + DominatorTree *DT) { + PHINode *PHI = dyn_cast(User); + if (!PHI) + return User; + + Instruction *InsertPt = nullptr; + for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) { + if (PHI->getIncomingValue(i) != Def) + continue; + + BasicBlock *InsertBB = PHI->getIncomingBlock(i); + if (!InsertPt) { + InsertPt = InsertBB->getTerminator(); + continue; + } + InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB); + InsertPt = InsertBB->getTerminator(); + } + assert(InsertPt && "Missing phi operand"); + assert((!isa(Def) || + DT->dominates(cast(Def), InsertPt)) && + "def does not dominate all uses"); + return InsertPt; +} + //===----------------------------------------------------------------------===// // RewriteNonIntegerIVs and helpers. Prefer integer IVs. //===----------------------------------------------------------------------===// @@ -218,8 +224,6 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { /// ConvertToSInt - Convert APF to an integer, if possible. static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { bool isExact = false; - if (&APF.getSemantics() == &APFloat::PPCDoubleDouble) - return false; // See if we can convert this to an int64_t uint64_t UIntVal; if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero, @@ -254,34 +258,34 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // an add or increment value can not be represented by an integer. BinaryOperator *Incr = dyn_cast(PN->getIncomingValue(BackEdge)); - if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return; + if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return; // If this is not an add of the PHI with a constantfp, or if the constant fp // is not an integer, bail out. ConstantFP *IncValueVal = dyn_cast(Incr->getOperand(1)); int64_t IncValue; - if (IncValueVal == 0 || Incr->getOperand(0) != PN || + if (IncValueVal == nullptr || Incr->getOperand(0) != PN || !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) return; // Check Incr uses. One user is PN and the other user is an exit condition // used by the conditional terminator. - Value::use_iterator IncrUse = Incr->use_begin(); + Value::user_iterator IncrUse = Incr->user_begin(); Instruction *U1 = cast(*IncrUse++); - if (IncrUse == Incr->use_end()) return; + if (IncrUse == Incr->user_end()) return; Instruction *U2 = cast(*IncrUse++); - if (IncrUse != Incr->use_end()) return; + if (IncrUse != Incr->user_end()) return; // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't // only used by a branch, we can't transform it. FCmpInst *Compare = dyn_cast(U1); if (!Compare) Compare = dyn_cast(U2); - if (Compare == 0 || !Compare->hasOneUse() || - !isa(Compare->use_back())) + if (!Compare || !Compare->hasOneUse() || + !isa(Compare->user_back())) return; - BranchInst *TheBr = cast(Compare->use_back()); + BranchInst *TheBr = cast(Compare->user_back()); // We need to verify that the branch actually controls the iteration count // of the loop. If not, the new IV can overflow and no one will notice. @@ -298,7 +302,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // transform it. ConstantFP *ExitValueVal = dyn_cast(Compare->getOperand(1)); int64_t ExitValue; - if (ExitValueVal == 0 || + if (ExitValueVal == nullptr || !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue)) return; @@ -337,14 +341,14 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // Positive and negative strides have different safety conditions. if (IncValue > 0) { // If we have a positive stride, we require the init to be less than the - // exit value and an equality or less than comparison. - if (InitValue >= ExitValue || - NewPred == CmpInst::ICMP_SGT || NewPred == CmpInst::ICMP_SGE) + // exit value. + if (InitValue >= ExitValue) return; uint32_t Range = uint32_t(ExitValue-InitValue); - if (NewPred == CmpInst::ICMP_SLE) { - // Normalize SLE -> SLT, check for infinite loop. + // Check for infinite loop, either: + // while (i <= Exit) or until (i > Exit) + if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) { if (++Range == 0) return; // Range overflows. } @@ -364,14 +368,14 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { } else { // If we have a negative stride, we require the init to be greater than the - // exit value and an equality or greater than comparison. - if (InitValue >= ExitValue || - NewPred == CmpInst::ICMP_SLT || NewPred == CmpInst::ICMP_SLE) + // exit value. + if (InitValue <= ExitValue) return; uint32_t Range = uint32_t(InitValue-ExitValue); - if (NewPred == CmpInst::ICMP_SGE) { - // Normalize SGE -> SGT, check for infinite loop. + // Check for infinite loop, either: + // while (i >= Exit) or until (i < Exit) + if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) { if (++Range == 0) return; // Range overflows. } @@ -390,7 +394,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { return; } - const IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); + IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); // Insert new integer induction variable. PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN); @@ -414,11 +418,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // new comparison. NewCompare->takeName(Compare); Compare->replaceAllUsesWith(NewCompare); - RecursivelyDeleteTriviallyDeadInstructions(Compare); + RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); // Delete the old floating point increment. Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); - RecursivelyDeleteTriviallyDeadInstructions(Incr); + RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); // If the FP induction variable still has uses, this is because something else // in the loop uses its value. In order to canonicalize the induction @@ -429,14 +433,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // platforms. if (WeakPH) { Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", - PN->getParent()->getFirstNonPHI()); + PN->getParent()->getFirstInsertionPt()); PN->replaceAllUsesWith(Conv); - RecursivelyDeleteTriviallyDeadInstructions(PN); + RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); } - - // Add a new IVUsers entry for the newly-created integer PHI. - if (IU) - IU->AddUsersIfInteresting(NewPHI); + Changed = true; } void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) { @@ -497,6 +498,21 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { unsigned NumPreds = PN->getNumIncomingValues(); + // We would like to be able to RAUW single-incoming value PHI nodes. We + // have to be certain this is safe even when this is an LCSSA PHI node. + // While the computed exit value is no longer varying in *this* loop, the + // exit block may be an exit block for an outer containing loop as well, + // the exit value may be varying in the outer loop, and thus it may still + // require an LCSSA PHI node. The safe case is when this is + // single-predecessor PHI node (LCSSA) and the exit block containing it is + // part of the enclosing loop, or this is the outer most loop of the nest. + // In either case the exit value could (at most) be varying in the same + // loop body as the phi node itself. Thus if it is in turn used outside of + // an enclosing loop it will only be via a separate LCSSA node. + bool LCSSASafePhiForRAUW = + NumPreds == 1 && + (!L->getParentLoop() || L->getParentLoop() == LI->getLoopFor(ExitBB)); + // Iterate over all of the PHI nodes. BasicBlock::iterator BBI = ExitBB->begin(); while ((PN = dyn_cast(BBI++))) { @@ -535,9 +551,49 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { // and varies predictably *inside* the loop. Evaluate the value it // contains when the loop exits, if possible. const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); - if (!SE->isLoopInvariant(ExitValue, L)) + if (!SE->isLoopInvariant(ExitValue, L) || + !isSafeToExpand(ExitValue, *SE)) continue; + // Computing the value outside of the loop brings no benefit if : + // - it is definitely used inside the loop in a way which can not be + // optimized away. + // - no use outside of the loop can take advantage of hoisting the + // computation out of the loop + if (ExitValue->getSCEVType()>=scMulExpr) { + unsigned NumHardInternalUses = 0; + unsigned NumSoftExternalUses = 0; + unsigned NumUses = 0; + for (auto IB = Inst->user_begin(), IE = Inst->user_end(); + IB != IE && NumUses <= 6; ++IB) { + Instruction *UseInstr = cast(*IB); + unsigned Opc = UseInstr->getOpcode(); + NumUses++; + if (L->contains(UseInstr)) { + if (Opc == Instruction::Call || Opc == Instruction::Ret) + NumHardInternalUses++; + } else { + if (Opc == Instruction::PHI) { + // Do not count the Phi as a use. LCSSA may have inserted + // plenty of trivial ones. + NumUses--; + for (auto PB = UseInstr->user_begin(), + PE = UseInstr->user_end(); + PB != PE && NumUses <= 6; ++PB, ++NumUses) { + unsigned PhiOpc = cast(*PB)->getOpcode(); + if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret) + NumSoftExternalUses++; + } + continue; + } + if (Opc != Instruction::Call && Opc != Instruction::Ret) + NumSoftExternalUses++; + } + } + if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses) + continue; + } + Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst); DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' @@ -552,20 +608,23 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { PN->setIncomingValue(i, ExitVal); - // If this instruction is dead now, delete it. - RecursivelyDeleteTriviallyDeadInstructions(Inst); + // If this instruction is dead now, delete it. Don't do it now to avoid + // invalidating iterators. + if (isInstructionTriviallyDead(Inst, TLI)) + DeadInsts.push_back(Inst); - if (NumPreds == 1) { - // Completely replace a single-pred PHI. This is safe, because the - // NewVal won't be variant in the loop, so we don't need an LCSSA phi - // node anymore. + // If we determined that this PHI is safe to replace even if an LCSSA + // PHI, do so. + if (LCSSASafePhiForRAUW) { PN->replaceAllUsesWith(ExitVal); - RecursivelyDeleteTriviallyDeadInstructions(PN); + PN->eraseFromParent(); } } - if (NumPreds != 1) { - // Clone the PHI and delete the original one. This lets IVUsers and - // any other maps purge the original user from their records. + + // If we were unable to completely replace the PHI node, clone the PHI + // and delete the original one. This lets IVUsers and any other maps + // purge the original user from their records. + if (!LCSSASafePhiForRAUW) { PHINode *NewPN = cast(PN->clone()); NewPN->takeName(PN); NewPN->insertBefore(PN); @@ -580,164 +639,6 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { Rewriter.clearInsertPoint(); } -//===----------------------------------------------------------------------===// -// Rewrite IV users based on a canonical IV. -// To be replaced by -disable-iv-rewrite. -//===----------------------------------------------------------------------===// - -/// SimplifyIVUsers - Iteratively perform simplification on IVUsers within this -/// loop. IVUsers is treated as a worklist. Each successive simplification may -/// push more users which may themselves be candidates for simplification. -/// -/// This is the old approach to IV simplification to be replaced by -/// SimplifyIVUsersNoRewrite. -/// -void IndVarSimplify::SimplifyIVUsers(SCEVExpander &Rewriter) { - // Each round of simplification involves a round of eliminating operations - // followed by a round of widening IVs. A single IVUsers worklist is used - // across all rounds. The inner loop advances the user. If widening exposes - // more uses, then another pass through the outer loop is triggered. - for (IVUsers::iterator I = IU->begin(); I != IU->end(); ++I) { - Instruction *UseInst = I->getUser(); - Value *IVOperand = I->getOperandValToReplace(); - - if (ICmpInst *ICmp = dyn_cast(UseInst)) { - EliminateIVComparison(ICmp, IVOperand); - continue; - } - if (BinaryOperator *Rem = dyn_cast(UseInst)) { - bool IsSigned = Rem->getOpcode() == Instruction::SRem; - if (IsSigned || Rem->getOpcode() == Instruction::URem) { - EliminateIVRemainder(Rem, IVOperand, IsSigned); - continue; - } - } - } -} - -// FIXME: It is an extremely bad idea to indvar substitute anything more -// complex than affine induction variables. Doing so will put expensive -// polynomial evaluations inside of the loop, and the str reduction pass -// currently can only reduce affine polynomials. For now just disable -// indvar subst on anything more complex than an affine addrec, unless -// it can be expanded to a trivial value. -static bool isSafe(const SCEV *S, const Loop *L, ScalarEvolution *SE) { - // Loop-invariant values are safe. - if (SE->isLoopInvariant(S, L)) return true; - - // Affine addrecs are safe. Non-affine are not, because LSR doesn't know how - // to transform them into efficient code. - if (const SCEVAddRecExpr *AR = dyn_cast(S)) - return AR->isAffine(); - - // An add is safe it all its operands are safe. - if (const SCEVCommutativeExpr *Commutative = dyn_cast(S)) { - for (SCEVCommutativeExpr::op_iterator I = Commutative->op_begin(), - E = Commutative->op_end(); I != E; ++I) - if (!isSafe(*I, L, SE)) return false; - return true; - } - - // A cast is safe if its operand is. - if (const SCEVCastExpr *C = dyn_cast(S)) - return isSafe(C->getOperand(), L, SE); - - // A udiv is safe if its operands are. - if (const SCEVUDivExpr *UD = dyn_cast(S)) - return isSafe(UD->getLHS(), L, SE) && - isSafe(UD->getRHS(), L, SE); - - // SCEVUnknown is always safe. - if (isa(S)) - return true; - - // Nothing else is safe. - return false; -} - -void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) { - // Rewrite all induction variable expressions in terms of the canonical - // induction variable. - // - // If there were induction variables of other sizes or offsets, manually - // add the offsets to the primary induction variable and cast, avoiding - // the need for the code evaluation methods to insert induction variables - // of different sizes. - for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) { - Value *Op = UI->getOperandValToReplace(); - const Type *UseTy = Op->getType(); - Instruction *User = UI->getUser(); - - // Compute the final addrec to expand into code. - const SCEV *AR = IU->getReplacementExpr(*UI); - - // Evaluate the expression out of the loop, if possible. - if (!L->contains(UI->getUser())) { - const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop()); - if (SE->isLoopInvariant(ExitVal, L)) - AR = ExitVal; - } - - // FIXME: It is an extremely bad idea to indvar substitute anything more - // complex than affine induction variables. Doing so will put expensive - // polynomial evaluations inside of the loop, and the str reduction pass - // currently can only reduce affine polynomials. For now just disable - // indvar subst on anything more complex than an affine addrec, unless - // it can be expanded to a trivial value. - if (!isSafe(AR, L, SE)) - continue; - - // Determine the insertion point for this user. By default, insert - // immediately before the user. The SCEVExpander class will automatically - // hoist loop invariants out of the loop. For PHI nodes, there may be - // multiple uses, so compute the nearest common dominator for the - // incoming blocks. - Instruction *InsertPt = User; - if (PHINode *PHI = dyn_cast(InsertPt)) - for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) - if (PHI->getIncomingValue(i) == Op) { - if (InsertPt == User) - InsertPt = PHI->getIncomingBlock(i)->getTerminator(); - else - InsertPt = - DT->findNearestCommonDominator(InsertPt->getParent(), - PHI->getIncomingBlock(i)) - ->getTerminator(); - } - - // Now expand it into actual Instructions and patch it into place. - Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt); - - DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n' - << " into = " << *NewVal << "\n"); - - if (!isValidRewrite(Op, NewVal)) { - DeadInsts.push_back(NewVal); - continue; - } - // Inform ScalarEvolution that this value is changing. The change doesn't - // affect its value, but it does potentially affect which use lists the - // value will be on after the replacement, which affects ScalarEvolution's - // ability to walk use lists and drop dangling pointers when a value is - // deleted. - SE->forgetValue(User); - - // Patch the new value into place. - if (Op->hasName()) - NewVal->takeName(Op); - if (Instruction *NewValI = dyn_cast(NewVal)) - NewValI->setDebugLoc(User->getDebugLoc()); - User->replaceUsesOfWith(Op, NewVal); - UI->setOperandValToReplace(NewVal); - - ++NumRemoved; - Changed = true; - - // The old value may be dead now. - DeadInsts.push_back(Op); - } -} - //===----------------------------------------------------------------------===// // IV Widening - Extend the width of an IV to cover its widest uses. //===----------------------------------------------------------------------===// @@ -747,21 +648,27 @@ namespace { // extend operations. This information is recorded by CollectExtend and // provides the input to WidenIV. struct WideIVInfo { - const Type *WidestNativeType; // Widest integer type created [sz]ext - bool IsSigned; // Was an sext user seen before a zext? + PHINode *NarrowIV; + Type *WidestNativeType; // Widest integer type created [sz]ext + bool IsSigned; // Was an sext user seen before a zext? - WideIVInfo() : WidestNativeType(0), IsSigned(false) {} + WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr), + IsSigned(false) {} }; } -/// CollectExtend - Update information about the induction variable that is +/// visitCast - Update information about the induction variable that is /// extended by this sign or zero extend operation. This is used to determine /// the final width of the IV before actually widening it. -static void CollectExtend(CastInst *Cast, bool IsSigned, WideIVInfo &WI, - ScalarEvolution *SE, const TargetData *TD) { - const Type *Ty = Cast->getType(); +static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, + const DataLayout *DL) { + bool IsSigned = Cast->getOpcode() == Instruction::SExt; + if (!IsSigned && Cast->getOpcode() != Instruction::ZExt) + return; + + Type *Ty = Cast->getType(); uint64_t Width = SE->getTypeSizeInBits(Ty); - if (TD && !TD->isLegalInteger(Width)) + if (DL && !DL->isLegalInteger(Width)) return; if (!WI.WidestNativeType) { @@ -779,6 +686,21 @@ static void CollectExtend(CastInst *Cast, bool IsSigned, WideIVInfo &WI, } namespace { + +/// NarrowIVDefUse - Record a link in the Narrow IV def-use chain along with the +/// WideIV that computes the same value as the Narrow IV def. This avoids +/// caching Use* pointers. +struct NarrowIVDefUse { + Instruction *NarrowDef; + Instruction *NarrowUse; + Instruction *WideDef; + + NarrowIVDefUse(): NarrowDef(nullptr), NarrowUse(nullptr), WideDef(nullptr) {} + + NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD): + NarrowDef(ND), NarrowUse(NU), WideDef(WD) {} +}; + /// WidenIV - The goal of this transform is to remove sign and zero extends /// without creating any new induction variables. To do this, it creates a new /// phi of the wider type and redirects all users, either removing extends or @@ -787,7 +709,7 @@ namespace { class WidenIV { // Parameters PHINode *OrigPhi; - const Type *WideType; + Type *WideType; bool IsSigned; // Context @@ -803,22 +725,22 @@ class WidenIV { SmallVectorImpl &DeadInsts; SmallPtrSet Widened; - SmallVector, 8> NarrowIVUsers; + SmallVector NarrowIVUsers; public: - WidenIV(PHINode *PN, const WideIVInfo &WI, LoopInfo *LInfo, + WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv, DominatorTree *DTree, SmallVectorImpl &DI) : - OrigPhi(PN), + OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), IsSigned(WI.IsSigned), LI(LInfo), L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree), - WidePhi(0), - WideInc(0), - WideIncExpr(0), + WidePhi(nullptr), + WideInc(nullptr), + WideIncExpr(nullptr), DeadInsts(DI) { assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); } @@ -826,21 +748,45 @@ public: PHINode *CreateWideIV(SCEVExpander &Rewriter); protected: - Instruction *CloneIVUser(Instruction *NarrowUse, - Instruction *NarrowDef, - Instruction *WideDef); + Value *getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, + Instruction *Use); + + Instruction *CloneIVUser(NarrowIVDefUse DU); const SCEVAddRecExpr *GetWideRecurrence(Instruction *NarrowUse); - Instruction *WidenIVUse(Use &NarrowDefUse, Instruction *NarrowDef, - Instruction *WideDef); + const SCEVAddRecExpr* GetExtendedOperandRecurrence(NarrowIVDefUse DU); + + const SCEV *GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, + unsigned OpCode) const; + + Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter); void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef); }; } // anonymous namespace -static Value *getExtend( Value *NarrowOper, const Type *WideType, - bool IsSigned, IRBuilder<> &Builder) { +/// isLoopInvariant - Perform a quick domtree based check for loop invariance +/// assuming that V is used within the loop. LoopInfo::isLoopInvariant() seems +/// gratuitous for this purpose. +static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) { + Instruction *Inst = dyn_cast(V); + if (!Inst) + return true; + + return DT->properlyDominates(Inst->getParent(), L->getHeader()); +} + +Value *WidenIV::getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, + Instruction *Use) { + // Set the debug location and conservative insertion point. + IRBuilder<> Builder(Use); + // Hoist the insertion point into loop preheaders as far as possible. + for (const Loop *L = LI->getLoopFor(Use->getParent()); + L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT); + L = L->getParentLoop()) + Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); + return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) : Builder.CreateZExt(NarrowOper, WideType); } @@ -848,13 +794,11 @@ static Value *getExtend( Value *NarrowOper, const Type *WideType, /// CloneIVUser - Instantiate a wide operation to replace a narrow /// operation. This only needs to handle operations that can evaluation to /// SCEVAddRec. It can safely return 0 for any operation we decide not to clone. -Instruction *WidenIV::CloneIVUser(Instruction *NarrowUse, - Instruction *NarrowDef, - Instruction *WideDef) { - unsigned Opcode = NarrowUse->getOpcode(); +Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) { + unsigned Opcode = DU.NarrowUse->getOpcode(); switch (Opcode) { default: - return 0; + return nullptr; case Instruction::Add: case Instruction::Mul: case Instruction::UDiv: @@ -865,24 +809,23 @@ Instruction *WidenIV::CloneIVUser(Instruction *NarrowUse, case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: - DEBUG(dbgs() << "Cloning IVUser: " << *NarrowUse << "\n"); - - IRBuilder<> Builder(NarrowUse); + DEBUG(dbgs() << "Cloning IVUser: " << *DU.NarrowUse << "\n"); // Replace NarrowDef operands with WideDef. Otherwise, we don't know // anything about the narrow operand yet so must insert a [sz]ext. It is // probably loop invariant and will be folded or hoisted. If it actually // comes from a widened IV, it should be removed during a future call to // WidenIVUse. - Value *LHS = (NarrowUse->getOperand(0) == NarrowDef) ? WideDef : - getExtend(NarrowUse->getOperand(0), WideType, IsSigned, Builder); - Value *RHS = (NarrowUse->getOperand(1) == NarrowDef) ? WideDef : - getExtend(NarrowUse->getOperand(1), WideType, IsSigned, Builder); + Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef) ? DU.WideDef : + getExtend(DU.NarrowUse->getOperand(0), WideType, IsSigned, DU.NarrowUse); + Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef) ? DU.WideDef : + getExtend(DU.NarrowUse->getOperand(1), WideType, IsSigned, DU.NarrowUse); - BinaryOperator *NarrowBO = cast(NarrowUse); + BinaryOperator *NarrowBO = cast(DU.NarrowUse); BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, NarrowBO->getName()); + IRBuilder<> Builder(DU.NarrowUse); Builder.Insert(WideBO); if (const OverflowingBinaryOperator *OBO = dyn_cast(NarrowBO)) { @@ -891,58 +834,77 @@ Instruction *WidenIV::CloneIVUser(Instruction *NarrowUse, } return WideBO; } - llvm_unreachable(0); } -/// HoistStep - Attempt to hoist an IV increment above a potential use. -/// -/// To successfully hoist, two criteria must be met: -/// - IncV operands dominate InsertPos and -/// - InsertPos dominates IncV -/// -/// Meeting the second condition means that we don't need to check all of IncV's -/// existing uses (it's moving up in the domtree). -/// -/// This does not yet recursively hoist the operands, although that would -/// not be difficult. -static bool HoistStep(Instruction *IncV, Instruction *InsertPos, - const DominatorTree *DT) -{ - if (DT->dominates(IncV, InsertPos)) - return true; +const SCEV *WidenIV::GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, + unsigned OpCode) const { + if (OpCode == Instruction::Add) + return SE->getAddExpr(LHS, RHS); + if (OpCode == Instruction::Sub) + return SE->getMinusSCEV(LHS, RHS); + if (OpCode == Instruction::Mul) + return SE->getMulExpr(LHS, RHS); - if (!DT->dominates(InsertPos->getParent(), IncV->getParent())) - return false; - - if (IncV->mayHaveSideEffects()) - return false; + llvm_unreachable("Unsupported opcode."); +} - // Attempt to hoist IncV - for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end(); - OI != OE; ++OI) { - Instruction *OInst = dyn_cast(OI); - if (OInst && !DT->dominates(OInst, InsertPos)) - return false; - } - IncV->moveBefore(InsertPos); - return true; +/// No-wrap operations can transfer sign extension of their result to their +/// operands. Generate the SCEV value for the widened operation without +/// actually modifying the IR yet. If the expression after extending the +/// operands is an AddRec for this loop, return it. +const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) { + + // Handle the common case of add + const unsigned OpCode = DU.NarrowUse->getOpcode(); + // Only Add/Sub/Mul instructions supported yet. + if (OpCode != Instruction::Add && OpCode != Instruction::Sub && + OpCode != Instruction::Mul) + return nullptr; + + // One operand (NarrowDef) has already been extended to WideDef. Now determine + // if extending the other will lead to a recurrence. + unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0; + assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU"); + + const SCEV *ExtendOperExpr = nullptr; + const OverflowingBinaryOperator *OBO = + cast(DU.NarrowUse); + if (IsSigned && OBO->hasNoSignedWrap()) + ExtendOperExpr = SE->getSignExtendExpr( + SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); + else if(!IsSigned && OBO->hasNoUnsignedWrap()) + ExtendOperExpr = SE->getZeroExtendExpr( + SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); + else + return nullptr; + + // When creating this SCEV expr, don't apply the current operations NSW or NUW + // flags. This instruction may be guarded by control flow that the no-wrap + // behavior depends on. Non-control-equivalent instructions can be mapped to + // the same SCEV expression, and it would be incorrect to transfer NSW/NUW + // semantics to those operations. + const SCEVAddRecExpr *AddRec = dyn_cast( + GetSCEVByOpCode(SE->getSCEV(DU.WideDef), ExtendOperExpr, OpCode)); + if (!AddRec || AddRec->getLoop() != L) + return nullptr; + return AddRec; } -// GetWideRecurrence - Is this instruction potentially interesting from IVUsers' -// perspective after widening it's type? In other words, can the extend be -// safely hoisted out of the loop with SCEV reducing the value to a recurrence -// on the same loop. If so, return the sign or zero extended -// recurrence. Otherwise return NULL. +/// GetWideRecurrence - Is this instruction potentially interesting from +/// IVUsers' perspective after widening it's type? In other words, can the +/// extend be safely hoisted out of the loop with SCEV reducing the value to a +/// recurrence on the same loop. If so, return the sign or zero extended +/// recurrence. Otherwise return NULL. const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) { if (!SE->isSCEVable(NarrowUse->getType())) - return 0; + return nullptr; const SCEV *NarrowExpr = SE->getSCEV(NarrowUse); if (SE->getTypeSizeInBits(NarrowExpr->getType()) >= SE->getTypeSizeInBits(WideType)) { // NarrowUse implicitly widens its operand. e.g. a gep with a narrow // index. So don't follow this use. - return 0; + return nullptr; } const SCEV *WideExpr = IsSigned ? @@ -950,48 +912,74 @@ const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) { SE->getZeroExtendExpr(NarrowExpr, WideType); const SCEVAddRecExpr *AddRec = dyn_cast(WideExpr); if (!AddRec || AddRec->getLoop() != L) - return 0; - + return nullptr; return AddRec; } +/// This IV user cannot be widen. Replace this use of the original narrow IV +/// with a truncation of the new wide IV to isolate and eliminate the narrow IV. +static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT) { + DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef + << " for user " << *DU.NarrowUse << "\n"); + IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT)); + Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); + DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc); +} + /// WidenIVUse - Determine whether an individual user of the narrow IV can be /// widened. If so, return the wide clone of the user. -Instruction *WidenIV::WidenIVUse(Use &NarrowDefUse, Instruction *NarrowDef, - Instruction *WideDef) { - Instruction *NarrowUse = cast(NarrowDefUse.getUser()); +Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { // Stop traversing the def-use chain at inner-loop phis or post-loop phis. - if (isa(NarrowUse) && LI->getLoopFor(NarrowUse->getParent()) != L) - return 0; - + if (PHINode *UsePhi = dyn_cast(DU.NarrowUse)) { + if (LI->getLoopFor(UsePhi->getParent()) != L) { + // For LCSSA phis, sink the truncate outside the loop. + // After SimplifyCFG most loop exit targets have a single predecessor. + // Otherwise fall back to a truncate within the loop. + if (UsePhi->getNumOperands() != 1) + truncateIVUse(DU, DT); + else { + PHINode *WidePhi = + PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide", + UsePhi); + WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0)); + IRBuilder<> Builder(WidePhi->getParent()->getFirstInsertionPt()); + Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); + UsePhi->replaceAllUsesWith(Trunc); + DeadInsts.push_back(UsePhi); + DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi + << " to " << *WidePhi << "\n"); + } + return nullptr; + } + } // Our raison d'etre! Eliminate sign and zero extension. - if (IsSigned ? isa(NarrowUse) : isa(NarrowUse)) { - Value *NewDef = WideDef; - if (NarrowUse->getType() != WideType) { - unsigned CastWidth = SE->getTypeSizeInBits(NarrowUse->getType()); + if (IsSigned ? isa(DU.NarrowUse) : isa(DU.NarrowUse)) { + Value *NewDef = DU.WideDef; + if (DU.NarrowUse->getType() != WideType) { + unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType()); unsigned IVWidth = SE->getTypeSizeInBits(WideType); if (CastWidth < IVWidth) { // The cast isn't as wide as the IV, so insert a Trunc. - IRBuilder<> Builder(NarrowDefUse); - NewDef = Builder.CreateTrunc(WideDef, NarrowUse->getType()); + IRBuilder<> Builder(DU.NarrowUse); + NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType()); } else { // A wider extend was hidden behind a narrower one. This may induce // another round of IV widening in which the intermediate IV becomes // dead. It should be very rare. DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi - << " not wide enough to subsume " << *NarrowUse << "\n"); - NarrowUse->replaceUsesOfWith(NarrowDef, WideDef); - NewDef = NarrowUse; + << " not wide enough to subsume " << *DU.NarrowUse << "\n"); + DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); + NewDef = DU.NarrowUse; } } - if (NewDef != NarrowUse) { - DEBUG(dbgs() << "INDVARS: eliminating " << *NarrowUse - << " replaced by " << *WideDef << "\n"); + if (NewDef != DU.NarrowUse) { + DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse + << " replaced by " << *DU.WideDef << "\n"); ++NumElimExt; - NarrowUse->replaceAllUsesWith(NewDef); - DeadInsts.push_back(NarrowUse); + DU.NarrowUse->replaceAllUsesWith(NewDef); + DeadInsts.push_back(DU.NarrowUse); } // Now that the extend is gone, we want to expose it's uses for potential // further simplification. We don't need to directly inform SimplifyIVUsers @@ -1000,35 +988,36 @@ Instruction *WidenIV::WidenIVUse(Use &NarrowDefUse, Instruction *NarrowDef, // push the uses of WideDef here. // No further widening is needed. The deceased [sz]ext had done it for us. - return 0; + return nullptr; } // Does this user itself evaluate to a recurrence after widening? - const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(NarrowUse); + const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(DU.NarrowUse); + if (!WideAddRec) { + WideAddRec = GetExtendedOperandRecurrence(DU); + } if (!WideAddRec) { // This user does not evaluate to a recurence after widening, so don't // follow it. Instead insert a Trunc to kill off the original use, // eventually isolating the original narrow IV so it can be removed. - IRBuilder<> Builder(NarrowDefUse); - Value *Trunc = Builder.CreateTrunc(WideDef, NarrowDef->getType()); - NarrowUse->replaceUsesOfWith(NarrowDef, Trunc); - return 0; + truncateIVUse(DU, DT); + return nullptr; } - // We assume that block terminators are not SCEVable. We wouldn't want to + // Assume block terminators cannot evaluate to a recurrence. We can't to // insert a Trunc after a terminator if there happens to be a critical edge. - assert(NarrowUse != NarrowUse->getParent()->getTerminator() && + assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() && "SCEV is not expected to evaluate a block terminator"); // Reuse the IV increment that SCEVExpander created as long as it dominates // NarrowUse. - Instruction *WideUse = 0; - if (WideAddRec == WideIncExpr && HoistStep(WideInc, NarrowUse, DT)) { + Instruction *WideUse = nullptr; + if (WideAddRec == WideIncExpr + && Rewriter.hoistIVInc(WideInc, DU.NarrowUse)) WideUse = WideInc; - } else { - WideUse = CloneIVUser(NarrowUse, NarrowDef, WideDef); + WideUse = CloneIVUser(DU); if (!WideUse) - return 0; + return nullptr; } // Evaluation of WideAddRec ensured that the narrow expression could be // extended outside the loop without overflow. This suggests that the wide use @@ -1039,7 +1028,7 @@ Instruction *WidenIV::WidenIVUse(Use &NarrowDefUse, Instruction *NarrowDef, DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n"); DeadInsts.push_back(WideUse); - return 0; + return nullptr; } // Returning WideUse pushes it on the worklist. @@ -1049,15 +1038,14 @@ Instruction *WidenIV::WidenIVUse(Use &NarrowDefUse, Instruction *NarrowDef, /// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers. /// void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) { - for (Value::use_iterator UI = NarrowDef->use_begin(), - UE = NarrowDef->use_end(); UI != UE; ++UI) { - Use &U = UI.getUse(); + for (User *U : NarrowDef->users()) { + Instruction *NarrowUser = cast(U); // Handle data flow merges and bizarre phi cycles. - if (!Widened.insert(cast(U.getUser()))) + if (!Widened.insert(NarrowUser)) continue; - NarrowIVUsers.push_back(std::make_pair(&UI.getUse(), WideDef)); + NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef)); } } @@ -1075,7 +1063,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { // Is this phi an induction variable? const SCEVAddRecExpr *AddRec = dyn_cast(SE->getSCEV(OrigPhi)); if (!AddRec) - return NULL; + return nullptr; // Widen the induction variable expression. const SCEV *WideIVExpr = IsSigned ? @@ -1088,7 +1076,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { // Can the IV be extended outside the loop without overflow? AddRec = dyn_cast(WideIVExpr); if (!AddRec || AddRec->getLoop() != L) - return NULL; + return nullptr; // An AddRec must have loop-invariant operands. Since this AddRec is // materialized by a loop header phi, the expression cannot have any post-loop @@ -1124,212 +1112,65 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { pushNarrowIVUsers(OrigPhi, WidePhi); while (!NarrowIVUsers.empty()) { - Use *UsePtr; - Instruction *WideDef; - tie(UsePtr, WideDef) = NarrowIVUsers.pop_back_val(); - Use &NarrowDefUse = *UsePtr; + NarrowIVDefUse DU = NarrowIVUsers.pop_back_val(); // Process a def-use edge. This may replace the use, so don't hold a // use_iterator across it. - Instruction *NarrowDef = cast(NarrowDefUse.get()); - Instruction *WideUse = WidenIVUse(NarrowDefUse, NarrowDef, WideDef); + Instruction *WideUse = WidenIVUse(DU, Rewriter); // Follow all def-use edges from the previous narrow use. if (WideUse) - pushNarrowIVUsers(cast(NarrowDefUse.getUser()), WideUse); + pushNarrowIVUsers(DU.NarrowUse, WideUse); // WidenIVUse may have removed the def-use edge. - if (NarrowDef->use_empty()) - DeadInsts.push_back(NarrowDef); + if (DU.NarrowDef->use_empty()) + DeadInsts.push_back(DU.NarrowDef); } return WidePhi; } //===----------------------------------------------------------------------===// -// Simplification of IV users based on SCEV evaluation. +// Live IV Reduction - Minimize IVs live across the loop. //===----------------------------------------------------------------------===// -void IndVarSimplify::EliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) { - unsigned IVOperIdx = 0; - ICmpInst::Predicate Pred = ICmp->getPredicate(); - if (IVOperand != ICmp->getOperand(0)) { - // Swapped - assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand"); - IVOperIdx = 1; - Pred = ICmpInst::getSwappedPredicate(Pred); - } - - // Get the SCEVs for the ICmp operands. - const SCEV *S = SE->getSCEV(ICmp->getOperand(IVOperIdx)); - const SCEV *X = SE->getSCEV(ICmp->getOperand(1 - IVOperIdx)); - - // Simplify unnecessary loops away. - const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent()); - S = SE->getSCEVAtScope(S, ICmpLoop); - X = SE->getSCEVAtScope(X, ICmpLoop); - - // If the condition is always true or always false, replace it with - // a constant value. - if (SE->isKnownPredicate(Pred, S, X)) - ICmp->replaceAllUsesWith(ConstantInt::getTrue(ICmp->getContext())); - else if (SE->isKnownPredicate(ICmpInst::getInversePredicate(Pred), S, X)) - ICmp->replaceAllUsesWith(ConstantInt::getFalse(ICmp->getContext())); - else - return; - - DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n'); - ++NumElimCmp; - Changed = true; - DeadInsts.push_back(ICmp); -} - -void IndVarSimplify::EliminateIVRemainder(BinaryOperator *Rem, - Value *IVOperand, - bool IsSigned) { - // We're only interested in the case where we know something about - // the numerator. - if (IVOperand != Rem->getOperand(0)) - return; - - // Get the SCEVs for the ICmp operands. - const SCEV *S = SE->getSCEV(Rem->getOperand(0)); - const SCEV *X = SE->getSCEV(Rem->getOperand(1)); - - // Simplify unnecessary loops away. - const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent()); - S = SE->getSCEVAtScope(S, ICmpLoop); - X = SE->getSCEVAtScope(X, ICmpLoop); - - // i % n --> i if i is in [0,n). - if ((!IsSigned || SE->isKnownNonNegative(S)) && - SE->isKnownPredicate(IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, - S, X)) - Rem->replaceAllUsesWith(Rem->getOperand(0)); - else { - // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n). - const SCEV *LessOne = - SE->getMinusSCEV(S, SE->getConstant(S->getType(), 1)); - if (IsSigned && !SE->isKnownNonNegative(LessOne)) - return; - - if (!SE->isKnownPredicate(IsSigned ? - ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, - LessOne, X)) - return; - ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, - Rem->getOperand(0), Rem->getOperand(1), - "tmp"); - SelectInst *Sel = - SelectInst::Create(ICmp, - ConstantInt::get(Rem->getType(), 0), - Rem->getOperand(0), "tmp", Rem); - Rem->replaceAllUsesWith(Sel); - } +//===----------------------------------------------------------------------===// +// Simplification of IV users based on SCEV evaluation. +//===----------------------------------------------------------------------===// - // Inform IVUsers about the new users. - if (IU) { - if (Instruction *I = dyn_cast(Rem->getOperand(0))) - IU->AddUsersIfInteresting(I); - } - DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n'); - ++NumElimRem; - Changed = true; - DeadInsts.push_back(Rem); -} +namespace { + class IndVarSimplifyVisitor : public IVVisitor { + ScalarEvolution *SE; + const DataLayout *DL; + PHINode *IVPhi; -/// EliminateIVUser - Eliminate an operation that consumes a simple IV and has -/// no observable side-effect given the range of IV values. -bool IndVarSimplify::EliminateIVUser(Instruction *UseInst, - Instruction *IVOperand) { - if (ICmpInst *ICmp = dyn_cast(UseInst)) { - EliminateIVComparison(ICmp, IVOperand); - return true; - } - if (BinaryOperator *Rem = dyn_cast(UseInst)) { - bool IsSigned = Rem->getOpcode() == Instruction::SRem; - if (IsSigned || Rem->getOpcode() == Instruction::URem) { - EliminateIVRemainder(Rem, IVOperand, IsSigned); - return true; + public: + WideIVInfo WI; + + IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, + const DataLayout *DL, const DominatorTree *DTree): + SE(SCEV), DL(DL), IVPhi(IV) { + DT = DTree; + WI.NarrowIV = IVPhi; + if (ReduceLiveIVs) + setSplitOverflowIntrinsics(); } - } - - // Eliminate any operation that SCEV can prove is an identity function. - if (!SE->isSCEVable(UseInst->getType()) || - (UseInst->getType() != IVOperand->getType()) || - (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand))) - return false; - - DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n'); - - UseInst->replaceAllUsesWith(IVOperand); - ++NumElimIdentity; - Changed = true; - DeadInsts.push_back(UseInst); - return true; -} - -/// pushIVUsers - Add all uses of Def to the current IV's worklist. -/// -static void pushIVUsers( - Instruction *Def, - SmallPtrSet &Simplified, - SmallVectorImpl< std::pair > &SimpleIVUsers) { - - for (Value::use_iterator UI = Def->use_begin(), E = Def->use_end(); - UI != E; ++UI) { - Instruction *User = cast(*UI); - - // Avoid infinite or exponential worklist processing. - // Also ensure unique worklist users. - // If Def is a LoopPhi, it may not be in the Simplified set, so check for - // self edges first. - if (User != Def && Simplified.insert(User)) - SimpleIVUsers.push_back(std::make_pair(User, Def)); - } -} - -/// isSimpleIVUser - Return true if this instruction generates a simple SCEV -/// expression in terms of that IV. -/// -/// This is similar to IVUsers' isInsteresting() but processes each instruction -/// non-recursively when the operand is already known to be a simpleIVUser. -/// -static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) { - if (!SE->isSCEVable(I->getType())) - return false; - - // Get the symbolic expression for this instruction. - const SCEV *S = SE->getSCEV(I); - - // We assume that terminators are not SCEVable. - assert((!S || I != I->getParent()->getTerminator()) && - "can't fold terminators"); - // Only consider affine recurrences. - const SCEVAddRecExpr *AR = dyn_cast(S); - if (AR && AR->getLoop() == L) - return true; - - return false; + // Implement the interface used by simplifyUsersOfIV. + void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, DL); } + }; } -/// SimplifyIVUsersNoRewrite - Iteratively perform simplification on a worklist -/// of IV users. Each successive simplification may push more users which may +/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV +/// users. Each successive simplification may push more users which may /// themselves be candidates for simplification. /// -/// The "NoRewrite" algorithm does not require IVUsers analysis. Instead, it -/// simplifies instructions in-place during analysis. Rather than rewriting -/// induction variables bottom-up from their users, it transforms a chain of -/// IVUsers top-down, updating the IR only when it encouters a clear -/// optimization opportunitiy. A SCEVExpander "Rewriter" instance is still -/// needed, but only used to generate a new IV (phi) of wider type for sign/zero -/// extend elimination. -/// -/// Once DisableIVRewrite is default, LSR will be the only client of IVUsers. +/// Sign/Zero extend elimination is interleaved with IV simplification. /// -void IndVarSimplify::SimplifyIVUsersNoRewrite(Loop *L, SCEVExpander &Rewriter) { - std::map WideIVMap; +void IndVarSimplify::SimplifyAndExtend(Loop *L, + SCEVExpander &Rewriter, + LPPassManager &LPM) { + SmallVector WideIVs; SmallVector LoopPhis; for (BasicBlock::iterator I = L->getHeader()->begin(); isa(I); ++I) { @@ -1345,112 +1186,96 @@ void IndVarSimplify::SimplifyIVUsersNoRewrite(Loop *L, SCEVExpander &Rewriter) { // extension. The first time SCEV attempts to normalize sign/zero extension, // the result becomes final. So for the most predictable results, we delay // evaluation of sign/zero extend evaluation until needed, and avoid running - // other SCEV based analysis prior to SimplifyIVUsersNoRewrite. + // other SCEV based analysis prior to SimplifyAndExtend. do { PHINode *CurrIV = LoopPhis.pop_back_val(); // Information about sign/zero extensions of CurrIV. - WideIVInfo WI; - - // Instructions processed by SimplifyIVUsers for CurrIV. - SmallPtrSet Simplified; + IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT); - // Use-def pairs if IV users waiting to be processed for CurrIV. - SmallVector, 8> SimpleIVUsers; + Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor); - // Push users of the current LoopPhi. In rare cases, pushIVUsers may be - // called multiple times for the same LoopPhi. This is the proper thing to - // do for loop header phis that use each other. - pushIVUsers(CurrIV, Simplified, SimpleIVUsers); - - while (!SimpleIVUsers.empty()) { - Instruction *UseInst, *Operand; - tie(UseInst, Operand) = SimpleIVUsers.pop_back_val(); - // Bypass back edges to avoid extra work. - if (UseInst == CurrIV) continue; - - if (EliminateIVUser(UseInst, Operand)) { - pushIVUsers(Operand, Simplified, SimpleIVUsers); - continue; - } - if (CastInst *Cast = dyn_cast(UseInst)) { - bool IsSigned = Cast->getOpcode() == Instruction::SExt; - if (IsSigned || Cast->getOpcode() == Instruction::ZExt) { - CollectExtend(Cast, IsSigned, WI, SE, TD); - } - continue; - } - if (isSimpleIVUser(UseInst, L, SE)) { - pushIVUsers(UseInst, Simplified, SimpleIVUsers); - } - } - if (WI.WidestNativeType) { - WideIVMap[CurrIV] = WI; + if (Visitor.WI.WidestNativeType) { + WideIVs.push_back(Visitor.WI); } } while(!LoopPhis.empty()); - for (std::map::const_iterator I = WideIVMap.begin(), - E = WideIVMap.end(); I != E; ++I) { - WidenIV Widener(I->first, I->second, LI, SE, DT, DeadInsts); + for (; !WideIVs.empty(); WideIVs.pop_back()) { + WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts); if (PHINode *WidePhi = Widener.CreateWideIV(Rewriter)) { Changed = true; LoopPhis.push_back(WidePhi); } } - WideIVMap.clear(); } } -/// SimplifyCongruentIVs - Check for congruent phis in this loop header and -/// populate ExprToIVMap for use later. -/// -void IndVarSimplify::SimplifyCongruentIVs(Loop *L) { - DenseMap ExprToIVMap; - for (BasicBlock::iterator I = L->getHeader()->begin(); isa(I); ++I) { - PHINode *Phi = cast(I); - if (!SE->isSCEVable(Phi->getType())) - continue; +//===----------------------------------------------------------------------===// +// LinearFunctionTestReplace and its kin. Rewrite the loop exit condition. +//===----------------------------------------------------------------------===// - const SCEV *S = SE->getSCEV(Phi); - DenseMap::const_iterator Pos; - bool Inserted; - tie(Pos, Inserted) = ExprToIVMap.insert(std::make_pair(S, Phi)); - if (Inserted) - continue; - PHINode *OrigPhi = Pos->second; - // Replacing the congruent phi is sufficient because acyclic redundancy - // elimination, CSE/GVN, should handle the rest. However, once SCEV proves - // that a phi is congruent, it's almost certain to be the head of an IV - // user cycle that is isomorphic with the original phi. So it's worth - // eagerly cleaning up the common case of a single IV increment. - if (BasicBlock *LatchBlock = L->getLoopLatch()) { - Instruction *OrigInc = - cast(OrigPhi->getIncomingValueForBlock(LatchBlock)); - Instruction *IsomorphicInc = - cast(Phi->getIncomingValueForBlock(LatchBlock)); - if (OrigInc != IsomorphicInc && - SE->getSCEV(OrigInc) == SE->getSCEV(IsomorphicInc) && - HoistStep(OrigInc, IsomorphicInc, DT)) { - DEBUG(dbgs() << "INDVARS: Eliminated congruent iv.inc: " - << *IsomorphicInc << '\n'); - IsomorphicInc->replaceAllUsesWith(OrigInc); - DeadInsts.push_back(IsomorphicInc); - } +/// Check for expressions that ScalarEvolution generates to compute +/// BackedgeTakenInfo. If these expressions have not been reduced, then +/// expanding them may incur additional cost (albeit in the loop preheader). +static bool isHighCostExpansion(const SCEV *S, BranchInst *BI, + SmallPtrSetImpl &Processed, + ScalarEvolution *SE) { + if (!Processed.insert(S)) + return false; + + // If the backedge-taken count is a UDiv, it's very likely a UDiv that + // ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a + // precise expression, rather than a UDiv from the user's code. If we can't + // find a UDiv in the code with some simple searching, assume the former and + // forego rewriting the loop. + if (isa(S)) { + ICmpInst *OrigCond = dyn_cast(BI->getCondition()); + if (!OrigCond) return true; + const SCEV *R = SE->getSCEV(OrigCond->getOperand(1)); + R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1)); + if (R != S) { + const SCEV *L = SE->getSCEV(OrigCond->getOperand(0)); + L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1)); + if (L != S) + return true; } - DEBUG(dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); - ++NumElimIV; - Phi->replaceAllUsesWith(OrigPhi); - DeadInsts.push_back(Phi); } -} -//===----------------------------------------------------------------------===// -// LinearFunctionTestReplace and its kin. Rewrite the loop exit condition. -//===----------------------------------------------------------------------===// + // Recurse past add expressions, which commonly occur in the + // BackedgeTakenCount. They may already exist in program code, and if not, + // they are not too expensive rematerialize. + if (const SCEVAddExpr *Add = dyn_cast(S)) { + for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); + I != E; ++I) { + if (isHighCostExpansion(*I, BI, Processed, SE)) + return true; + } + return false; + } + + // HowManyLessThans uses a Max expression whenever the loop is not guarded by + // the exit condition. + if (isa(S) || isa(S)) + return true; + + // If we haven't recognized an expensive SCEV pattern, assume it's an + // expression produced by program code. + return false; +} /// canExpandBackedgeTakenCount - Return true if this loop's backedge taken /// count expression can be safely and cheaply expanded into an instruction /// sequence that can be used by LinearFunctionTestReplace. +/// +/// TODO: This fails for pointer-type loop counters with greater than one byte +/// strides, consequently preventing LFTR from running. For the purpose of LFTR +/// we could skip this check in the case that the LFTR loop counter (chosen by +/// FindLoopCounter) is also pointer type. Instead, we could directly convert +/// the loop test to an inequality test by checking the target data's alignment +/// of element types (given that the initial pointer value originates from or is +/// used by ABI constrained operation, as opposed to inttoptr/ptrtoint). +/// However, we don't yet have a strong motivation for converting loop tests +/// into inequality tests. static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) { const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount) || @@ -1465,54 +1290,335 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) { if (!BI) return false; - // Special case: If the backedge-taken count is a UDiv, it's very likely a - // UDiv that ScalarEvolution produced in order to compute a precise - // expression, rather than a UDiv from the user's code. If we can't find a - // UDiv in the code with some simple searching, assume the former and forego - // rewriting the loop. - if (isa(BackedgeTakenCount)) { - ICmpInst *OrigCond = dyn_cast(BI->getCondition()); - if (!OrigCond) return false; - const SCEV *R = SE->getSCEV(OrigCond->getOperand(1)); - R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1)); - if (R != BackedgeTakenCount) { - const SCEV *L = SE->getSCEV(OrigCond->getOperand(0)); - L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1)); - if (L != BackedgeTakenCount) - return false; - } - } + SmallPtrSet Processed; + if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE)) + return false; + return true; } -/// getBackedgeIVType - Get the widest type used by the loop test after peeking -/// through Truncs. -/// -/// TODO: Unnecessary if LFTR does not force a canonical IV. -static const Type *getBackedgeIVType(Loop *L) { - if (!L->getExitingBlock()) - return 0; +/// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop +/// invariant value to the phi. +static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) { + Instruction *IncI = dyn_cast(IncV); + if (!IncI) + return nullptr; + + switch (IncI->getOpcode()) { + case Instruction::Add: + case Instruction::Sub: + break; + case Instruction::GetElementPtr: + // An IV counter must preserve its type. + if (IncI->getNumOperands() == 2) + break; + default: + return nullptr; + } + + PHINode *Phi = dyn_cast(IncI->getOperand(0)); + if (Phi && Phi->getParent() == L->getHeader()) { + if (isLoopInvariant(IncI->getOperand(1), L, DT)) + return Phi; + return nullptr; + } + if (IncI->getOpcode() == Instruction::GetElementPtr) + return nullptr; + + // Allow add/sub to be commuted. + Phi = dyn_cast(IncI->getOperand(1)); + if (Phi && Phi->getParent() == L->getHeader()) { + if (isLoopInvariant(IncI->getOperand(0), L, DT)) + return Phi; + } + return nullptr; +} + +/// Return the compare guarding the loop latch, or NULL for unrecognized tests. +static ICmpInst *getLoopTest(Loop *L) { + assert(L->getExitingBlock() && "expected loop exit"); + + BasicBlock *LatchBlock = L->getLoopLatch(); + // Don't bother with LFTR if the loop is not properly simplified. + if (!LatchBlock) + return nullptr; - // Can't rewrite non-branch yet. BranchInst *BI = dyn_cast(L->getExitingBlock()->getTerminator()); - if (!BI) - return 0; + assert(BI && "expected exit branch"); + + return dyn_cast(BI->getCondition()); +} - ICmpInst *Cond = dyn_cast(BI->getCondition()); +/// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show +/// that the current exit test is already sufficiently canonical. +static bool needsLFTR(Loop *L, DominatorTree *DT) { + // Do LFTR to simplify the exit condition to an ICMP. + ICmpInst *Cond = getLoopTest(L); if (!Cond) - return 0; - - const Type *Ty = 0; - for(User::op_iterator OI = Cond->op_begin(), OE = Cond->op_end(); - OI != OE; ++OI) { - assert((!Ty || Ty == (*OI)->getType()) && "bad icmp operand types"); - TruncInst *Trunc = dyn_cast(*OI); - if (!Trunc) + return true; + + // Do LFTR to simplify the exit ICMP to EQ/NE + ICmpInst::Predicate Pred = Cond->getPredicate(); + if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ) + return true; + + // Look for a loop invariant RHS + Value *LHS = Cond->getOperand(0); + Value *RHS = Cond->getOperand(1); + if (!isLoopInvariant(RHS, L, DT)) { + if (!isLoopInvariant(LHS, L, DT)) + return true; + std::swap(LHS, RHS); + } + // Look for a simple IV counter LHS + PHINode *Phi = dyn_cast(LHS); + if (!Phi) + Phi = getLoopPhiForCounter(LHS, L, DT); + + if (!Phi) + return true; + + // Do LFTR if PHI node is defined in the loop, but is *not* a counter. + int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); + if (Idx < 0) + return true; + + // Do LFTR if the exit condition's IV is *not* a simple counter. + Value *IncV = Phi->getIncomingValue(Idx); + return Phi != getLoopPhiForCounter(IncV, L, DT); +} + +/// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils +/// down to checking that all operands are constant and listing instructions +/// that may hide undef. +static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl &Visited, + unsigned Depth) { + if (isa(V)) + return !isa(V); + + if (Depth >= 6) + return false; + + // Conservatively handle non-constant non-instructions. For example, Arguments + // may be undef. + Instruction *I = dyn_cast(V); + if (!I) + return false; + + // Load and return values may be undef. + if(I->mayReadFromMemory() || isa(I) || isa(I)) + return false; + + // Optimistically handle other instructions. + for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { + if (!Visited.insert(*OI)) + continue; + if (!hasConcreteDefImpl(*OI, Visited, Depth+1)) + return false; + } + return true; +} + +/// Return true if the given value is concrete. We must prove that undef can +/// never reach it. +/// +/// TODO: If we decide that this is a good approach to checking for undef, we +/// may factor it into a common location. +static bool hasConcreteDef(Value *V) { + SmallPtrSet Visited; + Visited.insert(V); + return hasConcreteDefImpl(V, Visited, 0); +} + +/// AlmostDeadIV - Return true if this IV has any uses other than the (soon to +/// be rewritten) loop exit test. +static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { + int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); + Value *IncV = Phi->getIncomingValue(LatchIdx); + + for (User *U : Phi->users()) + if (U != Cond && U != IncV) return false; + + for (User *U : IncV->users()) + if (U != Cond && U != Phi) return false; + return true; +} + +/// FindLoopCounter - Find an affine IV in canonical form. +/// +/// BECount may be an i8* pointer type. The pointer difference is already +/// valid count without scaling the address stride, so it remains a pointer +/// expression as far as SCEV is concerned. +/// +/// Currently only valid for LFTR. See the comments on hasConcreteDef below. +/// +/// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount +/// +/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride. +/// This is difficult in general for SCEV because of potential overflow. But we +/// could at least handle constant BECounts. +static PHINode * +FindLoopCounter(Loop *L, const SCEV *BECount, + ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) { + uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); + + Value *Cond = + cast(L->getExitingBlock()->getTerminator())->getCondition(); + + // Loop over all of the PHI nodes, looking for a simple counter. + PHINode *BestPhi = nullptr; + const SCEV *BestInit = nullptr; + BasicBlock *LatchBlock = L->getLoopLatch(); + assert(LatchBlock && "needsLFTR should guarantee a loop latch"); + + for (BasicBlock::iterator I = L->getHeader()->begin(); isa(I); ++I) { + PHINode *Phi = cast(I); + if (!SE->isSCEVable(Phi->getType())) + continue; + + // Avoid comparing an integer IV against a pointer Limit. + if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy()) + continue; + + const SCEVAddRecExpr *AR = dyn_cast(SE->getSCEV(Phi)); + if (!AR || AR->getLoop() != L || !AR->isAffine()) + continue; + + // AR may be a pointer type, while BECount is an integer type. + // AR may be wider than BECount. With eq/ne tests overflow is immaterial. + // AR may not be a narrower type, or we may never exit. + uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); + if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth))) + continue; + + const SCEV *Step = dyn_cast(AR->getStepRecurrence(*SE)); + if (!Step || !Step->isOne()) + continue; + + int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); + Value *IncV = Phi->getIncomingValue(LatchIdx); + if (getLoopPhiForCounter(IncV, L, DT) != Phi) continue; - return Trunc->getSrcTy(); + // Avoid reusing a potentially undef value to compute other values that may + // have originally had a concrete definition. + if (!hasConcreteDef(Phi)) { + // We explicitly allow unknown phis as long as they are already used by + // the loop test. In this case we assume that performing LFTR could not + // increase the number of undef users. + if (ICmpInst *Cond = getLoopTest(L)) { + if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) + && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) { + continue; + } + } + } + const SCEV *Init = AR->getStart(); + + if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) { + // Don't force a live loop counter if another IV can be used. + if (AlmostDeadIV(Phi, LatchBlock, Cond)) + continue; + + // Prefer to count-from-zero. This is a more "canonical" counter form. It + // also prefers integer to pointer IVs. + if (BestInit->isZero() != Init->isZero()) { + if (BestInit->isZero()) + continue; + } + // If two IVs both count from zero or both count from nonzero then the + // narrower is likely a dead phi that has been widened. Use the wider phi + // to allow the other to be eliminated. + else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType())) + continue; + } + BestPhi = Phi; + BestInit = Init; + } + return BestPhi; +} + +/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that +/// holds the RHS of the new loop test. +static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, + SCEVExpander &Rewriter, ScalarEvolution *SE) { + const SCEVAddRecExpr *AR = dyn_cast(SE->getSCEV(IndVar)); + assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter"); + const SCEV *IVInit = AR->getStart(); + + // IVInit may be a pointer while IVCount is an integer when FindLoopCounter + // finds a valid pointer IV. Sign extend BECount in order to materialize a + // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing + // the existing GEPs whenever possible. + if (IndVar->getType()->isPointerTy() + && !IVCount->getType()->isPointerTy()) { + + // IVOffset will be the new GEP offset that is interpreted by GEP as a + // signed value. IVCount on the other hand represents the loop trip count, + // which is an unsigned value. FindLoopCounter only allows induction + // variables that have a positive unit stride of one. This means we don't + // have to handle the case of negative offsets (yet) and just need to zero + // extend IVCount. + Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType()); + const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy); + + // Expand the code for the iteration count. + assert(SE->isLoopInvariant(IVOffset, L) && + "Computed iteration count is not loop invariant!"); + BranchInst *BI = cast(L->getExitingBlock()->getTerminator()); + Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI); + + Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader()); + assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter"); + // We could handle pointer IVs other than i8*, but we need to compensate for + // gep index scaling. See canExpandBackedgeTakenCount comments. + assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()), + cast(GEPBase->getType())->getElementType())->isOne() + && "unit stride pointer IV must be i8*"); + + IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); + return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit"); + } + else { + // In any other case, convert both IVInit and IVCount to integers before + // comparing. This may result in SCEV expension of pointers, but in practice + // SCEV will fold the pointer arithmetic away as such: + // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc). + // + // Valid Cases: (1) both integers is most common; (2) both may be pointers + // for simple memset-style loops. + // + // IVInit integer and IVCount pointer would only occur if a canonical IV + // were generated on top of case #2, which is not expected. + + const SCEV *IVLimit = nullptr; + // For unit stride, IVCount = Start + BECount with 2's complement overflow. + // For non-zero Start, compute IVCount here. + if (AR->getStart()->isZero()) + IVLimit = IVCount; + else { + assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride"); + const SCEV *IVInit = AR->getStart(); + + // For integer IVs, truncate the IV before computing IVInit + BECount. + if (SE->getTypeSizeInBits(IVInit->getType()) + > SE->getTypeSizeInBits(IVCount->getType())) + IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); + + IVLimit = SE->getAddExpr(IVInit, IVCount); + } + // Expand the code for the iteration count. + BranchInst *BI = cast(L->getExitingBlock()->getTerminator()); + IRBuilder<> Builder(BI); + assert(SE->isLoopInvariant(IVLimit, L) && + "Computed iteration count is not loop invariant!"); + // Ensure that we generate the same type as IndVar, or a smaller integer + // type. In the presence of null pointer values, we have an integer type + // SCEV expression (IVInit) for a pointer type IV value (IndVar). + Type *LimitTy = IVCount->getType()->isPointerTy() ? + IndVar->getType() : IVCount->getType(); + return Rewriter.expandCodeFor(IVLimit, LimitTy, BI); } - return Ty; } /// LinearFunctionTestReplace - This method rewrites the exit condition of the @@ -1520,70 +1626,102 @@ static const Type *getBackedgeIVType(Loop *L) { /// variable. This pass is able to rewrite the exit tests of any loop where the /// SCEV analysis can determine a loop-invariant trip count of the loop, which /// is actually a much broader range than just linear tests. -ICmpInst *IndVarSimplify:: +Value *IndVarSimplify:: LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, PHINode *IndVar, SCEVExpander &Rewriter) { assert(canExpandBackedgeTakenCount(L, SE) && "precondition"); - BranchInst *BI = cast(L->getExitingBlock()->getTerminator()); - // If the exiting block is not the same as the backedge block, we must compare - // against the preincremented value, otherwise we prefer to compare against - // the post-incremented value. - Value *CmpIndVar; - const SCEV *RHS = BackedgeTakenCount; - if (L->getExitingBlock() == L->getLoopLatch()) { - // Add one to the "backedge-taken" count to get the trip count. - // If this addition may overflow, we have to be more pessimistic and - // cast the induction variable before doing the add. - const SCEV *Zero = SE->getConstant(BackedgeTakenCount->getType(), 0); - const SCEV *N = - SE->getAddExpr(BackedgeTakenCount, - SE->getConstant(BackedgeTakenCount->getType(), 1)); - if ((isa(N) && !N->isZero()) || - SE->isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, N, Zero)) { - // No overflow. Cast the sum. - RHS = SE->getTruncateOrZeroExtend(N, IndVar->getType()); - } else { - // Potential overflow. Cast before doing the add. - RHS = SE->getTruncateOrZeroExtend(BackedgeTakenCount, - IndVar->getType()); - RHS = SE->getAddExpr(RHS, - SE->getConstant(IndVar->getType(), 1)); - } + // Initialize CmpIndVar and IVCount to their preincremented values. + Value *CmpIndVar = IndVar; + const SCEV *IVCount = BackedgeTakenCount; + // If the exiting block is the same as the backedge block, we prefer to + // compare against the post-incremented value, otherwise we must compare + // against the preincremented value. + if (L->getExitingBlock() == L->getLoopLatch()) { // The BackedgeTaken expression contains the number of times that the // backedge branches to the loop header. This is one less than the // number of times the loop executes, so use the incremented indvar. - CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock()); - } else { - // We have to use the preincremented value... - RHS = SE->getTruncateOrZeroExtend(BackedgeTakenCount, - IndVar->getType()); - CmpIndVar = IndVar; + llvm::Value *IncrementedIndvar = + IndVar->getIncomingValueForBlock(L->getExitingBlock()); + const auto *IncrementedIndvarSCEV = + cast(SE->getSCEV(IncrementedIndvar)); + // It is unsafe to use the incremented indvar if it has a wrapping flag, we + // don't want to compare against a poison value. Check the SCEV that + // corresponds to the incremented indvar, the SCEVExpander will only insert + // flags in the IR if the SCEV originally had wrapping flags. + // FIXME: In theory, SCEV could drop flags even though they exist in IR. + // A more robust solution would involve getting a new expression for + // CmpIndVar by applying non-NSW/NUW AddExprs. + if (!ScalarEvolution::maskFlags(IncrementedIndvarSCEV->getNoWrapFlags(), + SCEV::FlagNUW | SCEV::FlagNSW)) { + // Add one to the "backedge-taken" count to get the trip count. + // This addition may overflow, which is valid as long as the comparison is + // truncated to BackedgeTakenCount->getType(). + IVCount = + SE->getAddExpr(BackedgeTakenCount, + SE->getConstant(BackedgeTakenCount->getType(), 1)); + CmpIndVar = IncrementedIndvar; + } } - // Expand the code for the iteration count. - assert(SE->isLoopInvariant(RHS, L) && - "Computed iteration count is not loop invariant!"); - Value *ExitCnt = Rewriter.expandCodeFor(RHS, IndVar->getType(), BI); + Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE); + assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy() + && "genLoopLimit missed a cast"); // Insert a new icmp_ne or icmp_eq instruction before the branch. - ICmpInst::Predicate Opcode; + BranchInst *BI = cast(L->getExitingBlock()->getTerminator()); + ICmpInst::Predicate P; if (L->contains(BI->getSuccessor(0))) - Opcode = ICmpInst::ICMP_NE; + P = ICmpInst::ICMP_NE; else - Opcode = ICmpInst::ICMP_EQ; + P = ICmpInst::ICMP_EQ; DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" << " LHS:" << *CmpIndVar << '\n' << " op:\t" - << (Opcode == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" - << " RHS:\t" << *RHS << "\n"); + << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" + << " RHS:\t" << *ExitCnt << "\n" + << " IVCount:\t" << *IVCount << "\n"); + + IRBuilder<> Builder(BI); + + // LFTR can ignore IV overflow and truncate to the width of + // BECount. This avoids materializing the add(zext(add)) expression. + unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType()); + unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType()); + if (CmpIndVarSize > ExitCntSize) { + const SCEVAddRecExpr *AR = cast(SE->getSCEV(IndVar)); + const SCEV *ARStart = AR->getStart(); + const SCEV *ARStep = AR->getStepRecurrence(*SE); + // For constant IVCount, avoid truncation. + if (isa(ARStart) && isa(IVCount)) { + const APInt &Start = cast(ARStart)->getValue()->getValue(); + APInt Count = cast(IVCount)->getValue()->getValue(); + // Note that the post-inc value of BackedgeTakenCount may have overflowed + // above such that IVCount is now zero. + if (IVCount != BackedgeTakenCount && Count == 0) { + Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize); + ++Count; + } + else + Count = Count.zext(CmpIndVarSize); + APInt NewLimit; + if (cast(ARStep)->getValue()->isNegative()) + NewLimit = Start - Count; + else + NewLimit = Start + Count; + ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); - ICmpInst *Cond = new ICmpInst(BI, Opcode, CmpIndVar, ExitCnt, "exitcond"); - Cond->setDebugLoc(BI->getDebugLoc()); + DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); + } else { + CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), + "lftr.wideiv"); + } + } + Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond"); Value *OrigCond = BI->getCondition(); // It's tempting to use replaceAllUsesWith here to fully replace the old // comparison, but that's not immediately safe, since users of the old @@ -1612,7 +1750,7 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) { BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) return; - Instruction *InsertPt = ExitBlock->getFirstNonPHI(); + Instruction *InsertPt = ExitBlock->getFirstInsertionPt(); BasicBlock::iterator I = Preheader->getTerminator(); while (I != Preheader->begin()) { --I; @@ -1633,22 +1771,26 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) { if (isa(I)) continue; - // Don't sink static AllocaInsts out of the entry block, which would - // turn them into dynamic allocas! - if (AllocaInst *AI = dyn_cast(I)) - if (AI->isStaticAlloca()) - continue; + // Skip landingpad instructions. + if (isa(I)) + continue; + + // Don't sink alloca: we never want to sink static alloca's out of the + // entry block, and correctly sinking dynamic alloca's requires + // checks for stacksave/stackrestore intrinsics. + // FIXME: Refactor this check somehow? + if (isa(I)) + continue; // Determine if there is a use in or before the loop (direct or // otherwise). bool UsedInLoop = false; - for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); - UI != UE; ++UI) { - User *U = *UI; - BasicBlock *UseBB = cast(U)->getParent(); - if (PHINode *P = dyn_cast(U)) { + for (Use &U : I->uses()) { + Instruction *User = cast(U.getUser()); + BasicBlock *UseBB = User->getParent(); + if (PHINode *P = dyn_cast(User)) { unsigned i = - PHINode::getIncomingValueNumForOperand(UI.getOperandNo()); + PHINode::getIncomingValueNumForOperand(U.getOperandNo()); UseBB = P->getIncomingBlock(i); } if (UseBB == Preheader || L->contains(UseBB)) { @@ -1688,6 +1830,9 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) { //===----------------------------------------------------------------------===// bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { + if (skipOptnoneFunction(L)) + return false; + // If LoopSimplify form is not available, stay out of trouble. Some notes: // - LSR currently only supports LoopSimplify-form loops. Indvars' // canonicalization can be a pessimization without LSR to "clean up" @@ -1699,12 +1844,12 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { if (!L->isLoopSimplifyForm()) return false; - if (!DisableIVRewrite) - IU = &getAnalysis(); LI = &getAnalysis(); SE = &getAnalysis(); - DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + DT = &getAnalysis().getDomTree(); + DataLayoutPass *DLP = getAnalysisIfAvailable(); + DL = DLP ? &DLP->getDataLayout() : nullptr; + TLI = getAnalysisIfAvailable(); DeadInsts.clear(); Changed = false; @@ -1717,6 +1862,9 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { // Create a rewriter object which we'll use to transform the code with. SCEVExpander Rewriter(*SE, "indvars"); +#ifndef NDEBUG + Rewriter.setDebugType(DEBUG_TYPE); +#endif // Eliminate redundant IV users. // @@ -1724,10 +1872,8 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { // attempt to avoid evaluating SCEVs for sign/zero extend operations until // other expressions involving loop IVs have been evaluated. This helps SCEV // set no-wrap flags before normalizing sign/zero extension. - if (DisableIVRewrite) { - Rewriter.disableCanonicalMode(); - SimplifyIVUsersNoRewrite(L, Rewriter); - } + Rewriter.disableCanonicalMode(); + SimplifyAndExtend(L, Rewriter, LPM); // Check to see if this loop has a computable loop-invariant execution count. // If so, this means that we can compute the final value of any expressions @@ -1738,108 +1884,28 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { if (!isa(BackedgeTakenCount)) RewriteLoopExitValues(L, Rewriter); - // Eliminate redundant IV users. - if (!DisableIVRewrite) - SimplifyIVUsers(Rewriter); - // Eliminate redundant IV cycles. - if (DisableIVRewrite) - SimplifyCongruentIVs(L); - - // Compute the type of the largest recurrence expression, and decide whether - // a canonical induction variable should be inserted. - const Type *LargestType = 0; - bool NeedCannIV = false; - bool ExpandBECount = canExpandBackedgeTakenCount(L, SE); - if (ExpandBECount) { - // If we have a known trip count and a single exit block, we'll be - // rewriting the loop exit test condition below, which requires a - // canonical induction variable. - NeedCannIV = true; - const Type *Ty = BackedgeTakenCount->getType(); - if (DisableIVRewrite) { - // In this mode, SimplifyIVUsers may have already widened the IV used by - // the backedge test and inserted a Trunc on the compare's operand. Get - // the wider type to avoid creating a redundant narrow IV only used by the - // loop test. - LargestType = getBackedgeIVType(L); - } - if (!LargestType || - SE->getTypeSizeInBits(Ty) > - SE->getTypeSizeInBits(LargestType)) - LargestType = SE->getEffectiveSCEVType(Ty); - } - if (!DisableIVRewrite) { - for (IVUsers::const_iterator I = IU->begin(), E = IU->end(); I != E; ++I) { - NeedCannIV = true; - const Type *Ty = - SE->getEffectiveSCEVType(I->getOperandValToReplace()->getType()); - if (!LargestType || - SE->getTypeSizeInBits(Ty) > - SE->getTypeSizeInBits(LargestType)) - LargestType = Ty; - } - } - - // Now that we know the largest of the induction variable expressions - // in this loop, insert a canonical induction variable of the largest size. - PHINode *IndVar = 0; - if (NeedCannIV) { - // Check to see if the loop already has any canonical-looking induction - // variables. If any are present and wider than the planned canonical - // induction variable, temporarily remove them, so that the Rewriter - // doesn't attempt to reuse them. - SmallVector OldCannIVs; - while (PHINode *OldCannIV = L->getCanonicalInductionVariable()) { - if (SE->getTypeSizeInBits(OldCannIV->getType()) > - SE->getTypeSizeInBits(LargestType)) - OldCannIV->removeFromParent(); - else - break; - OldCannIVs.push_back(OldCannIV); - } - - IndVar = Rewriter.getOrInsertCanonicalInductionVariable(L, LargestType); - - ++NumInserted; - Changed = true; - DEBUG(dbgs() << "INDVARS: New CanIV: " << *IndVar << '\n'); - - // Now that the official induction variable is established, reinsert - // any old canonical-looking variables after it so that the IR remains - // consistent. They will be deleted as part of the dead-PHI deletion at - // the end of the pass. - while (!OldCannIVs.empty()) { - PHINode *OldCannIV = OldCannIVs.pop_back_val(); - OldCannIV->insertBefore(L->getHeader()->getFirstNonPHI()); - } - } + NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts); // If we have a trip count expression, rewrite the loop's exit condition // using it. We can currently only handle loops with a single exit. - ICmpInst *NewICmp = 0; - if (ExpandBECount) { - assert(canExpandBackedgeTakenCount(L, SE) && - "canonical IV disrupted BackedgeTaken expansion"); - assert(NeedCannIV && - "LinearFunctionTestReplace requires a canonical induction variable"); - // Check preconditions for proper SCEVExpander operation. SCEV does not - // express SCEVExpander's dependencies, such as LoopSimplify. Instead any - // pass that uses the SCEVExpander must do it. This does not work well for - // loop passes because SCEVExpander makes assumptions about all loops, while - // LoopPassManager only forces the current loop to be simplified. - // - // FIXME: SCEV expansion has no way to bail out, so the caller must - // explicitly check any assumptions made by SCEV. Brittle. - const SCEVAddRecExpr *AR = dyn_cast(BackedgeTakenCount); - if (!AR || AR->getLoop()->getLoopPreheader()) - NewICmp = - LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, Rewriter); + if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) { + PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL); + if (IndVar) { + // Check preconditions for proper SCEVExpander operation. SCEV does not + // express SCEVExpander's dependencies, such as LoopSimplify. Instead any + // pass that uses the SCEVExpander must do it. This does not work well for + // loop passes because SCEVExpander makes assumptions about all loops, + // while LoopPassManager only forces the current loop to be simplified. + // + // FIXME: SCEV expansion has no way to bail out, so the caller must + // explicitly check any assumptions made by SCEV. Brittle. + const SCEVAddRecExpr *AR = dyn_cast(BackedgeTakenCount); + if (!AR || AR->getLoop()->getLoopPreheader()) + (void)LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, + Rewriter); + } } - // Rewrite IV-derived expressions. - if (!DisableIVRewrite) - RewriteIVExpressions(L, Rewriter); - // Clear the rewriter cache, because values that are in the rewriter's cache // can be deleted in the loop below, causing the AssertingVH in the cache to // trigger. @@ -1850,7 +1916,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { while (!DeadInsts.empty()) if (Instruction *Inst = dyn_cast_or_null(&*DeadInsts.pop_back_val())) - RecursivelyDeleteTriviallyDeadInstructions(Inst); + RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); // The Rewriter may not be used from this point on. @@ -1858,14 +1924,28 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { // loop may be sunk below the loop to reduce register pressure. SinkUnusedInvariants(L); - // For completeness, inform IVUsers of the IV use in the newly-created - // loop exit test instruction. - if (NewICmp && IU) - IU->AddUsersIfInteresting(cast(NewICmp->getOperand(0))); - // Clean up dead instructions. - Changed |= DeleteDeadPHIs(L->getHeader()); + Changed |= DeleteDeadPHIs(L->getHeader(), TLI); // Check a post-condition. - assert(L->isLCSSAForm(*DT) && "Indvars did not leave the loop in lcssa form!"); + assert(L->isLCSSAForm(*DT) && + "Indvars did not leave the loop in lcssa form!"); + + // Verify that LFTR, and any other change have not interfered with SCEV's + // ability to compute trip count. +#ifndef NDEBUG + if (VerifyIndvars && !isa(BackedgeTakenCount)) { + SE->forgetLoop(L); + const SCEV *NewBECount = SE->getBackedgeTakenCount(L); + if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) < + SE->getTypeSizeInBits(NewBECount->getType())) + NewBECount = SE->getTruncateOrNoop(NewBECount, + BackedgeTakenCount->getType()); + else + BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, + NewBECount->getType()); + assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV"); + } +#endif + return Changed; }