1 //===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into simpler forms suitable for subsequent
12 // analysis and transformation.
14 // If the trip count of a loop is computable, this pass also makes the following
16 // 1. The exit condition for the loop is canonicalized to compare the
17 // induction value against the exit value. This turns loops like:
18 // 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)'
19 // 2. Any use outside of the loop of an expression derived from the indvar
20 // is changed to compute the derived value outside of the loop, eliminating
21 // the dependence on the exit value of the induction variable. If the only
22 // purpose of the loop is to compute the exit value of some derived
23 // expression, this transformation will make the loop dead.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "indvars"
28 #include "llvm/Transforms/Scalar.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/LoopPass.h"
34 #include "llvm/Analysis/ScalarEvolutionExpander.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/Support/CFG.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetLibraryInfo.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
53 STATISTIC(NumWidened , "Number of indvars widened");
54 STATISTIC(NumReplaced , "Number of exit values replaced");
55 STATISTIC(NumLFTR , "Number of loop exit tests replaced");
56 STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated");
57 STATISTIC(NumElimIV , "Number of congruent IVs eliminated");
59 // Trip count verification can be enabled by default under NDEBUG if we
60 // implement a strong expression equivalence checker in SCEV. Until then, we
61 // use the verify-indvars flag, which may assert in some cases.
62 static cl::opt<bool> VerifyIndvars(
63 "verify-indvars", cl::Hidden,
64 cl::desc("Verify the ScalarEvolution result after running indvars"));
66 static cl::opt<bool> ReduceLiveIVs("liv-reduce", cl::Hidden,
67 cl::desc("Reduce live induction variables."));
70 class IndVarSimplify : public LoopPass {
75 TargetLibraryInfo *TLI;
77 SmallVector<WeakVH, 16> DeadInsts;
81 static char ID; // Pass identification, replacement for typeid
82 IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),
84 initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
87 virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
89 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
90 AU.addRequired<DominatorTreeWrapperPass>();
91 AU.addRequired<LoopInfo>();
92 AU.addRequired<ScalarEvolution>();
93 AU.addRequiredID(LoopSimplifyID);
94 AU.addRequiredID(LCSSAID);
95 AU.addPreserved<ScalarEvolution>();
96 AU.addPreservedID(LoopSimplifyID);
97 AU.addPreservedID(LCSSAID);
102 virtual void releaseMemory() {
106 bool isValidRewrite(Value *FromVal, Value *ToVal);
108 void HandleFloatingPointIV(Loop *L, PHINode *PH);
109 void RewriteNonIntegerIVs(Loop *L);
111 void SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM);
113 void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter);
115 Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount,
116 PHINode *IndVar, SCEVExpander &Rewriter);
118 void SinkUnusedInvariants(Loop *L);
122 char IndVarSimplify::ID = 0;
123 INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
124 "Induction Variable Simplification", false, false)
125 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
126 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
127 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
128 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
129 INITIALIZE_PASS_DEPENDENCY(LCSSA)
130 INITIALIZE_PASS_END(IndVarSimplify, "indvars",
131 "Induction Variable Simplification", false, false)
133 Pass *llvm::createIndVarSimplifyPass() {
134 return new IndVarSimplify();
137 /// isValidRewrite - Return true if the SCEV expansion generated by the
138 /// rewriter can replace the original value. SCEV guarantees that it
139 /// produces the same value, but the way it is produced may be illegal IR.
140 /// Ideally, this function will only be called for verification.
141 bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) {
142 // If an SCEV expression subsumed multiple pointers, its expansion could
143 // reassociate the GEP changing the base pointer. This is illegal because the
144 // final address produced by a GEP chain must be inbounds relative to its
145 // underlying object. Otherwise basic alias analysis, among other things,
146 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
147 // producing an expression involving multiple pointers. Until then, we must
150 // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject
151 // because it understands lcssa phis while SCEV does not.
152 Value *FromPtr = FromVal;
153 Value *ToPtr = ToVal;
154 if (GEPOperator *GEP = dyn_cast<GEPOperator>(FromVal)) {
155 FromPtr = GEP->getPointerOperand();
157 if (GEPOperator *GEP = dyn_cast<GEPOperator>(ToVal)) {
158 ToPtr = GEP->getPointerOperand();
160 if (FromPtr != FromVal || ToPtr != ToVal) {
161 // Quickly check the common case
162 if (FromPtr == ToPtr)
165 // SCEV may have rewritten an expression that produces the GEP's pointer
166 // operand. That's ok as long as the pointer operand has the same base
167 // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the
168 // base of a recurrence. This handles the case in which SCEV expansion
169 // converts a pointer type recurrence into a nonrecurrent pointer base
170 // indexed by an integer recurrence.
172 // If the GEP base pointer is a vector of pointers, abort.
173 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
176 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
177 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
178 if (FromBase == ToBase)
181 DEBUG(dbgs() << "INDVARS: GEP rewrite bail out "
182 << *FromBase << " != " << *ToBase << "\n");
189 /// Determine the insertion point for this user. By default, insert immediately
190 /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
191 /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
192 /// common dominator for the incoming blocks.
193 static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
195 PHINode *PHI = dyn_cast<PHINode>(User);
199 Instruction *InsertPt = 0;
200 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
201 if (PHI->getIncomingValue(i) != Def)
204 BasicBlock *InsertBB = PHI->getIncomingBlock(i);
206 InsertPt = InsertBB->getTerminator();
209 InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
210 InsertPt = InsertBB->getTerminator();
212 assert(InsertPt && "Missing phi operand");
213 assert((!isa<Instruction>(Def) ||
214 DT->dominates(cast<Instruction>(Def), InsertPt)) &&
215 "def does not dominate all uses");
219 //===----------------------------------------------------------------------===//
220 // RewriteNonIntegerIVs and helpers. Prefer integer IVs.
221 //===----------------------------------------------------------------------===//
223 /// ConvertToSInt - Convert APF to an integer, if possible.
224 static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
225 bool isExact = false;
226 // See if we can convert this to an int64_t
228 if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero,
229 &isExact) != APFloat::opOK || !isExact)
235 /// HandleFloatingPointIV - If the loop has floating induction variable
236 /// then insert corresponding integer induction variable if possible.
238 /// for(double i = 0; i < 10000; ++i)
240 /// is converted into
241 /// for(int i = 0; i < 10000; ++i)
244 void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
245 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
246 unsigned BackEdge = IncomingEdge^1;
248 // Check incoming value.
249 ConstantFP *InitValueVal =
250 dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));
253 if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
256 // Check IV increment. Reject this PN if increment operation is not
257 // an add or increment value can not be represented by an integer.
258 BinaryOperator *Incr =
259 dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
260 if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return;
262 // If this is not an add of the PHI with a constantfp, or if the constant fp
263 // is not an integer, bail out.
264 ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
266 if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
267 !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
270 // Check Incr uses. One user is PN and the other user is an exit condition
271 // used by the conditional terminator.
272 Value::use_iterator IncrUse = Incr->use_begin();
273 Instruction *U1 = cast<Instruction>(*IncrUse++);
274 if (IncrUse == Incr->use_end()) return;
275 Instruction *U2 = cast<Instruction>(*IncrUse++);
276 if (IncrUse != Incr->use_end()) return;
278 // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
279 // only used by a branch, we can't transform it.
280 FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
282 Compare = dyn_cast<FCmpInst>(U2);
283 if (Compare == 0 || !Compare->hasOneUse() ||
284 !isa<BranchInst>(Compare->use_back()))
287 BranchInst *TheBr = cast<BranchInst>(Compare->use_back());
289 // We need to verify that the branch actually controls the iteration count
290 // of the loop. If not, the new IV can overflow and no one will notice.
291 // The branch block must be in the loop and one of the successors must be out
293 assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
294 if (!L->contains(TheBr->getParent()) ||
295 (L->contains(TheBr->getSuccessor(0)) &&
296 L->contains(TheBr->getSuccessor(1))))
300 // If it isn't a comparison with an integer-as-fp (the exit value), we can't
302 ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
304 if (ExitValueVal == 0 ||
305 !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
308 // Find new predicate for integer comparison.
309 CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
310 switch (Compare->getPredicate()) {
311 default: return; // Unknown comparison.
312 case CmpInst::FCMP_OEQ:
313 case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break;
314 case CmpInst::FCMP_ONE:
315 case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break;
316 case CmpInst::FCMP_OGT:
317 case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break;
318 case CmpInst::FCMP_OGE:
319 case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break;
320 case CmpInst::FCMP_OLT:
321 case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break;
322 case CmpInst::FCMP_OLE:
323 case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break;
326 // We convert the floating point induction variable to a signed i32 value if
327 // we can. This is only safe if the comparison will not overflow in a way
328 // that won't be trapped by the integer equivalent operations. Check for this
330 // TODO: We could use i64 if it is native and the range requires it.
332 // The start/stride/exit values must all fit in signed i32.
333 if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
336 // If not actually striding (add x, 0.0), avoid touching the code.
340 // Positive and negative strides have different safety conditions.
342 // If we have a positive stride, we require the init to be less than the
344 if (InitValue >= ExitValue)
347 uint32_t Range = uint32_t(ExitValue-InitValue);
348 // Check for infinite loop, either:
349 // while (i <= Exit) or until (i > Exit)
350 if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) {
351 if (++Range == 0) return; // Range overflows.
354 unsigned Leftover = Range % uint32_t(IncValue);
356 // If this is an equality comparison, we require that the strided value
357 // exactly land on the exit value, otherwise the IV condition will wrap
358 // around and do things the fp IV wouldn't.
359 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
363 // If the stride would wrap around the i32 before exiting, we can't
365 if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue)
369 // If we have a negative stride, we require the init to be greater than the
371 if (InitValue <= ExitValue)
374 uint32_t Range = uint32_t(InitValue-ExitValue);
375 // Check for infinite loop, either:
376 // while (i >= Exit) or until (i < Exit)
377 if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) {
378 if (++Range == 0) return; // Range overflows.
381 unsigned Leftover = Range % uint32_t(-IncValue);
383 // If this is an equality comparison, we require that the strided value
384 // exactly land on the exit value, otherwise the IV condition will wrap
385 // around and do things the fp IV wouldn't.
386 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
390 // If the stride would wrap around the i32 before exiting, we can't
392 if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue)
396 IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
398 // Insert new integer induction variable.
399 PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN);
400 NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
401 PN->getIncomingBlock(IncomingEdge));
404 BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
405 Incr->getName()+".int", Incr);
406 NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));
408 ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd,
409 ConstantInt::get(Int32Ty, ExitValue),
412 // In the following deletions, PN may become dead and may be deleted.
413 // Use a WeakVH to observe whether this happens.
416 // Delete the old floating point exit comparison. The branch starts using the
418 NewCompare->takeName(Compare);
419 Compare->replaceAllUsesWith(NewCompare);
420 RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI);
422 // Delete the old floating point increment.
423 Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
424 RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI);
426 // If the FP induction variable still has uses, this is because something else
427 // in the loop uses its value. In order to canonicalize the induction
428 // variable, we chose to eliminate the IV and rewrite it in terms of an
431 // We give preference to sitofp over uitofp because it is faster on most
434 Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
435 PN->getParent()->getFirstInsertionPt());
436 PN->replaceAllUsesWith(Conv);
437 RecursivelyDeleteTriviallyDeadInstructions(PN, TLI);
442 void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
443 // First step. Check to see if there are any floating-point recurrences.
444 // If there are, change them into integer recurrences, permitting analysis by
445 // the SCEV routines.
447 BasicBlock *Header = L->getHeader();
449 SmallVector<WeakVH, 8> PHIs;
450 for (BasicBlock::iterator I = Header->begin();
451 PHINode *PN = dyn_cast<PHINode>(I); ++I)
454 for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
455 if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i]))
456 HandleFloatingPointIV(L, PN);
458 // If the loop previously had floating-point IV, ScalarEvolution
459 // may not have been able to compute a trip count. Now that we've done some
460 // re-writing, the trip count may be computable.
465 //===----------------------------------------------------------------------===//
466 // RewriteLoopExitValues - Optimize IV users outside the loop.
467 // As a side effect, reduces the amount of IV processing within the loop.
468 //===----------------------------------------------------------------------===//
470 /// RewriteLoopExitValues - Check to see if this loop has a computable
471 /// loop-invariant execution count. If so, this means that we can compute the
472 /// final value of any expressions that are recurrent in the loop, and
473 /// substitute the exit values from the loop into any instructions outside of
474 /// the loop that use the final values of the current expressions.
476 /// This is mostly redundant with the regular IndVarSimplify activities that
477 /// happen later, except that it's more powerful in some cases, because it's
478 /// able to brute-force evaluate arbitrary instructions as long as they have
479 /// constant operands at the beginning of the loop.
480 void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
481 // Verify the input to the pass in already in LCSSA form.
482 assert(L->isLCSSAForm(*DT));
484 SmallVector<BasicBlock*, 8> ExitBlocks;
485 L->getUniqueExitBlocks(ExitBlocks);
487 // Find all values that are computed inside the loop, but used outside of it.
488 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
489 // the exit blocks of the loop to find them.
490 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
491 BasicBlock *ExitBB = ExitBlocks[i];
493 // If there are no PHI nodes in this exit block, then no values defined
494 // inside the loop are used on this path, skip it.
495 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
498 unsigned NumPreds = PN->getNumIncomingValues();
500 // We would like to be able to RAUW single-incoming value PHI nodes. We
501 // have to be certain this is safe even when this is an LCSSA PHI node.
502 // While the computed exit value is no longer varying in *this* loop, the
503 // exit block may be an exit block for an outer containing loop as well,
504 // the exit value may be varying in the outer loop, and thus it may still
505 // require an LCSSA PHI node. The safe case is when this is
506 // single-predecessor PHI node (LCSSA) and the exit block containing it is
507 // part of the enclosing loop, or this is the outer most loop of the nest.
508 // In either case the exit value could (at most) be varying in the same
509 // loop body as the phi node itself. Thus if it is in turn used outside of
510 // an enclosing loop it will only be via a separate LCSSA node.
511 bool LCSSASafePhiForRAUW =
513 (!L->getParentLoop() || L->getParentLoop() == LI->getLoopFor(ExitBB));
515 // Iterate over all of the PHI nodes.
516 BasicBlock::iterator BBI = ExitBB->begin();
517 while ((PN = dyn_cast<PHINode>(BBI++))) {
519 continue; // dead use, don't replace it
521 // SCEV only supports integer expressions for now.
522 if (!PN->getType()->isIntegerTy() && !PN->getType()->isPointerTy())
525 // It's necessary to tell ScalarEvolution about this explicitly so that
526 // it can walk the def-use list and forget all SCEVs, as it may not be
527 // watching the PHI itself. Once the new exit value is in place, there
528 // may not be a def-use connection between the loop and every instruction
529 // which got a SCEVAddRecExpr for that loop.
532 // Iterate over all of the values in all the PHI nodes.
533 for (unsigned i = 0; i != NumPreds; ++i) {
534 // If the value being merged in is not integer or is not defined
535 // in the loop, skip it.
536 Value *InVal = PN->getIncomingValue(i);
537 if (!isa<Instruction>(InVal))
540 // If this pred is for a subloop, not L itself, skip it.
541 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
542 continue; // The Block is in a subloop, skip it.
544 // Check that InVal is defined in the loop.
545 Instruction *Inst = cast<Instruction>(InVal);
546 if (!L->contains(Inst))
549 // Okay, this instruction has a user outside of the current loop
550 // and varies predictably *inside* the loop. Evaluate the value it
551 // contains when the loop exits, if possible.
552 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
553 if (!SE->isLoopInvariant(ExitValue, L) ||
554 !isSafeToExpand(ExitValue, *SE))
557 // Computing the value outside of the loop brings no benefit if :
558 // - it is definitely used inside the loop in a way which can not be
560 // - no use outside of the loop can take advantage of hoisting the
561 // computation out of the loop
562 if (ExitValue->getSCEVType()>=scMulExpr) {
563 unsigned NumHardInternalUses = 0;
564 unsigned NumSoftExternalUses = 0;
565 unsigned NumUses = 0;
566 for (Value::use_iterator IB=Inst->use_begin(), IE=Inst->use_end();
567 IB!=IE && NumUses<=6 ; ++IB) {
568 Instruction *UseInstr = cast<Instruction>(*IB);
569 unsigned Opc = UseInstr->getOpcode();
571 if (L->contains(UseInstr)) {
572 if (Opc == Instruction::Call || Opc == Instruction::Ret)
573 NumHardInternalUses++;
575 if (Opc == Instruction::PHI) {
576 // Do not count the Phi as a use. LCSSA may have inserted
577 // plenty of trivial ones.
579 for (Value::use_iterator PB=UseInstr->use_begin(),
580 PE=UseInstr->use_end();
581 PB!=PE && NumUses<=6 ; ++PB, ++NumUses) {
582 unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode();
583 if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret)
584 NumSoftExternalUses++;
588 if (Opc != Instruction::Call && Opc != Instruction::Ret)
589 NumSoftExternalUses++;
592 if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses)
596 Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst);
598 DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n'
599 << " LoopVal = " << *Inst << "\n");
601 if (!isValidRewrite(Inst, ExitVal)) {
602 DeadInsts.push_back(ExitVal);
608 PN->setIncomingValue(i, ExitVal);
610 // If this instruction is dead now, delete it. Don't do it now to avoid
611 // invalidating iterators.
612 if (isInstructionTriviallyDead(Inst, TLI))
613 DeadInsts.push_back(Inst);
615 // If we determined that this PHI is safe to replace even if an LCSSA
617 if (LCSSASafePhiForRAUW) {
618 PN->replaceAllUsesWith(ExitVal);
619 PN->eraseFromParent();
623 // If we were unable to completely replace the PHI node, clone the PHI
624 // and delete the original one. This lets IVUsers and any other maps
625 // purge the original user from their records.
626 if (!LCSSASafePhiForRAUW) {
627 PHINode *NewPN = cast<PHINode>(PN->clone());
629 NewPN->insertBefore(PN);
630 PN->replaceAllUsesWith(NewPN);
631 PN->eraseFromParent();
636 // The insertion point instruction may have been deleted; clear it out
637 // so that the rewriter doesn't trip over it later.
638 Rewriter.clearInsertPoint();
641 //===----------------------------------------------------------------------===//
642 // IV Widening - Extend the width of an IV to cover its widest uses.
643 //===----------------------------------------------------------------------===//
646 // Collect information about induction variables that are used by sign/zero
647 // extend operations. This information is recorded by CollectExtend and
648 // provides the input to WidenIV.
651 Type *WidestNativeType; // Widest integer type created [sz]ext
652 bool IsSigned; // Was an sext user seen before a zext?
654 WideIVInfo() : NarrowIV(0), WidestNativeType(0), IsSigned(false) {}
658 /// visitCast - Update information about the induction variable that is
659 /// extended by this sign or zero extend operation. This is used to determine
660 /// the final width of the IV before actually widening it.
661 static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,
662 const DataLayout *TD) {
663 bool IsSigned = Cast->getOpcode() == Instruction::SExt;
664 if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
667 Type *Ty = Cast->getType();
668 uint64_t Width = SE->getTypeSizeInBits(Ty);
669 if (TD && !TD->isLegalInteger(Width))
672 if (!WI.WidestNativeType) {
673 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
674 WI.IsSigned = IsSigned;
678 // We extend the IV to satisfy the sign of its first user, arbitrarily.
679 if (WI.IsSigned != IsSigned)
682 if (Width > SE->getTypeSizeInBits(WI.WidestNativeType))
683 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
688 /// NarrowIVDefUse - Record a link in the Narrow IV def-use chain along with the
689 /// WideIV that computes the same value as the Narrow IV def. This avoids
690 /// caching Use* pointers.
691 struct NarrowIVDefUse {
692 Instruction *NarrowDef;
693 Instruction *NarrowUse;
694 Instruction *WideDef;
696 NarrowIVDefUse(): NarrowDef(0), NarrowUse(0), WideDef(0) {}
698 NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD):
699 NarrowDef(ND), NarrowUse(NU), WideDef(WD) {}
702 /// WidenIV - The goal of this transform is to remove sign and zero extends
703 /// without creating any new induction variables. To do this, it creates a new
704 /// phi of the wider type and redirects all users, either removing extends or
705 /// inserting truncs whenever we stop propagating the type.
721 Instruction *WideInc;
722 const SCEV *WideIncExpr;
723 SmallVectorImpl<WeakVH> &DeadInsts;
725 SmallPtrSet<Instruction*,16> Widened;
726 SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
729 WidenIV(const WideIVInfo &WI, LoopInfo *LInfo,
730 ScalarEvolution *SEv, DominatorTree *DTree,
731 SmallVectorImpl<WeakVH> &DI) :
732 OrigPhi(WI.NarrowIV),
733 WideType(WI.WidestNativeType),
734 IsSigned(WI.IsSigned),
736 L(LI->getLoopFor(OrigPhi->getParent())),
743 assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
746 PHINode *CreateWideIV(SCEVExpander &Rewriter);
749 Value *getExtend(Value *NarrowOper, Type *WideType, bool IsSigned,
752 Instruction *CloneIVUser(NarrowIVDefUse DU);
754 const SCEVAddRecExpr *GetWideRecurrence(Instruction *NarrowUse);
756 const SCEVAddRecExpr* GetExtendedOperandRecurrence(NarrowIVDefUse DU);
758 Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
760 void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
762 } // anonymous namespace
764 /// isLoopInvariant - Perform a quick domtree based check for loop invariance
765 /// assuming that V is used within the loop. LoopInfo::isLoopInvariant() seems
766 /// gratuitous for this purpose.
767 static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) {
768 Instruction *Inst = dyn_cast<Instruction>(V);
772 return DT->properlyDominates(Inst->getParent(), L->getHeader());
775 Value *WidenIV::getExtend(Value *NarrowOper, Type *WideType, bool IsSigned,
777 // Set the debug location and conservative insertion point.
778 IRBuilder<> Builder(Use);
779 // Hoist the insertion point into loop preheaders as far as possible.
780 for (const Loop *L = LI->getLoopFor(Use->getParent());
781 L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT);
782 L = L->getParentLoop())
783 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
785 return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
786 Builder.CreateZExt(NarrowOper, WideType);
789 /// CloneIVUser - Instantiate a wide operation to replace a narrow
790 /// operation. This only needs to handle operations that can evaluation to
791 /// SCEVAddRec. It can safely return 0 for any operation we decide not to clone.
792 Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
793 unsigned Opcode = DU.NarrowUse->getOpcode();
797 case Instruction::Add:
798 case Instruction::Mul:
799 case Instruction::UDiv:
800 case Instruction::Sub:
801 case Instruction::And:
802 case Instruction::Or:
803 case Instruction::Xor:
804 case Instruction::Shl:
805 case Instruction::LShr:
806 case Instruction::AShr:
807 DEBUG(dbgs() << "Cloning IVUser: " << *DU.NarrowUse << "\n");
809 // Replace NarrowDef operands with WideDef. Otherwise, we don't know
810 // anything about the narrow operand yet so must insert a [sz]ext. It is
811 // probably loop invariant and will be folded or hoisted. If it actually
812 // comes from a widened IV, it should be removed during a future call to
814 Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef) ? DU.WideDef :
815 getExtend(DU.NarrowUse->getOperand(0), WideType, IsSigned, DU.NarrowUse);
816 Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef) ? DU.WideDef :
817 getExtend(DU.NarrowUse->getOperand(1), WideType, IsSigned, DU.NarrowUse);
819 BinaryOperator *NarrowBO = cast<BinaryOperator>(DU.NarrowUse);
820 BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(),
822 NarrowBO->getName());
823 IRBuilder<> Builder(DU.NarrowUse);
824 Builder.Insert(WideBO);
825 if (const OverflowingBinaryOperator *OBO =
826 dyn_cast<OverflowingBinaryOperator>(NarrowBO)) {
827 if (OBO->hasNoUnsignedWrap()) WideBO->setHasNoUnsignedWrap();
828 if (OBO->hasNoSignedWrap()) WideBO->setHasNoSignedWrap();
834 /// No-wrap operations can transfer sign extension of their result to their
835 /// operands. Generate the SCEV value for the widened operation without
836 /// actually modifying the IR yet. If the expression after extending the
837 /// operands is an AddRec for this loop, return it.
838 const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
839 // Handle the common case of add<nsw/nuw>
840 if (DU.NarrowUse->getOpcode() != Instruction::Add)
843 // One operand (NarrowDef) has already been extended to WideDef. Now determine
844 // if extending the other will lead to a recurrence.
845 unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
846 assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
848 const SCEV *ExtendOperExpr = 0;
849 const OverflowingBinaryOperator *OBO =
850 cast<OverflowingBinaryOperator>(DU.NarrowUse);
851 if (IsSigned && OBO->hasNoSignedWrap())
852 ExtendOperExpr = SE->getSignExtendExpr(
853 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
854 else if(!IsSigned && OBO->hasNoUnsignedWrap())
855 ExtendOperExpr = SE->getZeroExtendExpr(
856 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
860 // When creating this AddExpr, don't apply the current operations NSW or NUW
861 // flags. This instruction may be guarded by control flow that the no-wrap
862 // behavior depends on. Non-control-equivalent instructions can be mapped to
863 // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
864 // semantics to those operations.
865 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(
866 SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr));
868 if (!AddRec || AddRec->getLoop() != L)
873 /// GetWideRecurrence - Is this instruction potentially interesting from
874 /// IVUsers' perspective after widening it's type? In other words, can the
875 /// extend be safely hoisted out of the loop with SCEV reducing the value to a
876 /// recurrence on the same loop. If so, return the sign or zero extended
877 /// recurrence. Otherwise return NULL.
878 const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
879 if (!SE->isSCEVable(NarrowUse->getType()))
882 const SCEV *NarrowExpr = SE->getSCEV(NarrowUse);
883 if (SE->getTypeSizeInBits(NarrowExpr->getType())
884 >= SE->getTypeSizeInBits(WideType)) {
885 // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
886 // index. So don't follow this use.
890 const SCEV *WideExpr = IsSigned ?
891 SE->getSignExtendExpr(NarrowExpr, WideType) :
892 SE->getZeroExtendExpr(NarrowExpr, WideType);
893 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
894 if (!AddRec || AddRec->getLoop() != L)
899 /// This IV user cannot be widen. Replace this use of the original narrow IV
900 /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
901 static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT) {
902 DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef
903 << " for user " << *DU.NarrowUse << "\n");
904 IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT));
905 Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
906 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
909 /// WidenIVUse - Determine whether an individual user of the narrow IV can be
910 /// widened. If so, return the wide clone of the user.
911 Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
913 // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
914 if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
915 if (LI->getLoopFor(UsePhi->getParent()) != L) {
916 // For LCSSA phis, sink the truncate outside the loop.
917 // After SimplifyCFG most loop exit targets have a single predecessor.
918 // Otherwise fall back to a truncate within the loop.
919 if (UsePhi->getNumOperands() != 1)
920 truncateIVUse(DU, DT);
923 PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
925 WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
926 IRBuilder<> Builder(WidePhi->getParent()->getFirstInsertionPt());
927 Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
928 UsePhi->replaceAllUsesWith(Trunc);
929 DeadInsts.push_back(UsePhi);
930 DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi
931 << " to " << *WidePhi << "\n");
936 // Our raison d'etre! Eliminate sign and zero extension.
937 if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) {
938 Value *NewDef = DU.WideDef;
939 if (DU.NarrowUse->getType() != WideType) {
940 unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
941 unsigned IVWidth = SE->getTypeSizeInBits(WideType);
942 if (CastWidth < IVWidth) {
943 // The cast isn't as wide as the IV, so insert a Trunc.
944 IRBuilder<> Builder(DU.NarrowUse);
945 NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
948 // A wider extend was hidden behind a narrower one. This may induce
949 // another round of IV widening in which the intermediate IV becomes
950 // dead. It should be very rare.
951 DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
952 << " not wide enough to subsume " << *DU.NarrowUse << "\n");
953 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
954 NewDef = DU.NarrowUse;
957 if (NewDef != DU.NarrowUse) {
958 DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
959 << " replaced by " << *DU.WideDef << "\n");
961 DU.NarrowUse->replaceAllUsesWith(NewDef);
962 DeadInsts.push_back(DU.NarrowUse);
964 // Now that the extend is gone, we want to expose it's uses for potential
965 // further simplification. We don't need to directly inform SimplifyIVUsers
966 // of the new users, because their parent IV will be processed later as a
967 // new loop phi. If we preserved IVUsers analysis, we would also want to
968 // push the uses of WideDef here.
970 // No further widening is needed. The deceased [sz]ext had done it for us.
974 // Does this user itself evaluate to a recurrence after widening?
975 const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(DU.NarrowUse);
977 WideAddRec = GetExtendedOperandRecurrence(DU);
980 // This user does not evaluate to a recurence after widening, so don't
981 // follow it. Instead insert a Trunc to kill off the original use,
982 // eventually isolating the original narrow IV so it can be removed.
983 truncateIVUse(DU, DT);
986 // Assume block terminators cannot evaluate to a recurrence. We can't to
987 // insert a Trunc after a terminator if there happens to be a critical edge.
988 assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() &&
989 "SCEV is not expected to evaluate a block terminator");
991 // Reuse the IV increment that SCEVExpander created as long as it dominates
993 Instruction *WideUse = 0;
994 if (WideAddRec == WideIncExpr
995 && Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
998 WideUse = CloneIVUser(DU);
1002 // Evaluation of WideAddRec ensured that the narrow expression could be
1003 // extended outside the loop without overflow. This suggests that the wide use
1004 // evaluates to the same expression as the extended narrow use, but doesn't
1005 // absolutely guarantee it. Hence the following failsafe check. In rare cases
1006 // where it fails, we simply throw away the newly created wide use.
1007 if (WideAddRec != SE->getSCEV(WideUse)) {
1008 DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse
1009 << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n");
1010 DeadInsts.push_back(WideUse);
1014 // Returning WideUse pushes it on the worklist.
1018 /// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers.
1020 void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
1021 for (Value::use_iterator UI = NarrowDef->use_begin(),
1022 UE = NarrowDef->use_end(); UI != UE; ++UI) {
1023 Instruction *NarrowUse = cast<Instruction>(*UI);
1025 // Handle data flow merges and bizarre phi cycles.
1026 if (!Widened.insert(NarrowUse))
1029 NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUse, WideDef));
1033 /// CreateWideIV - Process a single induction variable. First use the
1034 /// SCEVExpander to create a wide induction variable that evaluates to the same
1035 /// recurrence as the original narrow IV. Then use a worklist to forward
1036 /// traverse the narrow IV's def-use chain. After WidenIVUse has processed all
1037 /// interesting IV users, the narrow IV will be isolated for removal by
1040 /// It would be simpler to delete uses as they are processed, but we must avoid
1041 /// invalidating SCEV expressions.
1043 PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
1044 // Is this phi an induction variable?
1045 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
1049 // Widen the induction variable expression.
1050 const SCEV *WideIVExpr = IsSigned ?
1051 SE->getSignExtendExpr(AddRec, WideType) :
1052 SE->getZeroExtendExpr(AddRec, WideType);
1054 assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
1055 "Expect the new IV expression to preserve its type");
1057 // Can the IV be extended outside the loop without overflow?
1058 AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
1059 if (!AddRec || AddRec->getLoop() != L)
1062 // An AddRec must have loop-invariant operands. Since this AddRec is
1063 // materialized by a loop header phi, the expression cannot have any post-loop
1064 // operands, so they must dominate the loop header.
1065 assert(SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
1066 SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader())
1067 && "Loop header phi recurrence inputs do not dominate the loop");
1069 // The rewriter provides a value for the desired IV expression. This may
1070 // either find an existing phi or materialize a new one. Either way, we
1071 // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
1072 // of the phi-SCC dominates the loop entry.
1073 Instruction *InsertPt = L->getHeader()->begin();
1074 WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt));
1076 // Remembering the WideIV increment generated by SCEVExpander allows
1077 // WidenIVUse to reuse it when widening the narrow IV's increment. We don't
1078 // employ a general reuse mechanism because the call above is the only call to
1079 // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
1080 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1082 cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
1083 WideIncExpr = SE->getSCEV(WideInc);
1086 DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
1089 // Traverse the def-use chain using a worklist starting at the original IV.
1090 assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
1092 Widened.insert(OrigPhi);
1093 pushNarrowIVUsers(OrigPhi, WidePhi);
1095 while (!NarrowIVUsers.empty()) {
1096 NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
1098 // Process a def-use edge. This may replace the use, so don't hold a
1099 // use_iterator across it.
1100 Instruction *WideUse = WidenIVUse(DU, Rewriter);
1102 // Follow all def-use edges from the previous narrow use.
1104 pushNarrowIVUsers(DU.NarrowUse, WideUse);
1106 // WidenIVUse may have removed the def-use edge.
1107 if (DU.NarrowDef->use_empty())
1108 DeadInsts.push_back(DU.NarrowDef);
1113 //===----------------------------------------------------------------------===//
1114 // Live IV Reduction - Minimize IVs live across the loop.
1115 //===----------------------------------------------------------------------===//
1118 //===----------------------------------------------------------------------===//
1119 // Simplification of IV users based on SCEV evaluation.
1120 //===----------------------------------------------------------------------===//
1123 class IndVarSimplifyVisitor : public IVVisitor {
1124 ScalarEvolution *SE;
1125 const DataLayout *TD;
1131 IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
1132 const DataLayout *TData, const DominatorTree *DTree):
1133 SE(SCEV), TD(TData), IVPhi(IV) {
1135 WI.NarrowIV = IVPhi;
1137 setSplitOverflowIntrinsics();
1140 // Implement the interface used by simplifyUsersOfIV.
1141 virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, TD); }
1145 /// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
1146 /// users. Each successive simplification may push more users which may
1147 /// themselves be candidates for simplification.
1149 /// Sign/Zero extend elimination is interleaved with IV simplification.
1151 void IndVarSimplify::SimplifyAndExtend(Loop *L,
1152 SCEVExpander &Rewriter,
1153 LPPassManager &LPM) {
1154 SmallVector<WideIVInfo, 8> WideIVs;
1156 SmallVector<PHINode*, 8> LoopPhis;
1157 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
1158 LoopPhis.push_back(cast<PHINode>(I));
1160 // Each round of simplification iterates through the SimplifyIVUsers worklist
1161 // for all current phis, then determines whether any IVs can be
1162 // widened. Widening adds new phis to LoopPhis, inducing another round of
1163 // simplification on the wide IVs.
1164 while (!LoopPhis.empty()) {
1165 // Evaluate as many IV expressions as possible before widening any IVs. This
1166 // forces SCEV to set no-wrap flags before evaluating sign/zero
1167 // extension. The first time SCEV attempts to normalize sign/zero extension,
1168 // the result becomes final. So for the most predictable results, we delay
1169 // evaluation of sign/zero extend evaluation until needed, and avoid running
1170 // other SCEV based analysis prior to SimplifyAndExtend.
1172 PHINode *CurrIV = LoopPhis.pop_back_val();
1174 // Information about sign/zero extensions of CurrIV.
1175 IndVarSimplifyVisitor Visitor(CurrIV, SE, TD, DT);
1177 Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);
1179 if (Visitor.WI.WidestNativeType) {
1180 WideIVs.push_back(Visitor.WI);
1182 } while(!LoopPhis.empty());
1184 for (; !WideIVs.empty(); WideIVs.pop_back()) {
1185 WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts);
1186 if (PHINode *WidePhi = Widener.CreateWideIV(Rewriter)) {
1188 LoopPhis.push_back(WidePhi);
1194 //===----------------------------------------------------------------------===//
1195 // LinearFunctionTestReplace and its kin. Rewrite the loop exit condition.
1196 //===----------------------------------------------------------------------===//
1198 /// Check for expressions that ScalarEvolution generates to compute
1199 /// BackedgeTakenInfo. If these expressions have not been reduced, then
1200 /// expanding them may incur additional cost (albeit in the loop preheader).
1201 static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
1202 SmallPtrSet<const SCEV*, 8> &Processed,
1203 ScalarEvolution *SE) {
1204 if (!Processed.insert(S))
1207 // If the backedge-taken count is a UDiv, it's very likely a UDiv that
1208 // ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a
1209 // precise expression, rather than a UDiv from the user's code. If we can't
1210 // find a UDiv in the code with some simple searching, assume the former and
1211 // forego rewriting the loop.
1212 if (isa<SCEVUDivExpr>(S)) {
1213 ICmpInst *OrigCond = dyn_cast<ICmpInst>(BI->getCondition());
1214 if (!OrigCond) return true;
1215 const SCEV *R = SE->getSCEV(OrigCond->getOperand(1));
1216 R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1));
1218 const SCEV *L = SE->getSCEV(OrigCond->getOperand(0));
1219 L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1));
1225 // Recurse past add expressions, which commonly occur in the
1226 // BackedgeTakenCount. They may already exist in program code, and if not,
1227 // they are not too expensive rematerialize.
1228 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
1229 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1231 if (isHighCostExpansion(*I, BI, Processed, SE))
1237 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
1238 // the exit condition.
1239 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
1242 // If we haven't recognized an expensive SCEV pattern, assume it's an
1243 // expression produced by program code.
1247 /// canExpandBackedgeTakenCount - Return true if this loop's backedge taken
1248 /// count expression can be safely and cheaply expanded into an instruction
1249 /// sequence that can be used by LinearFunctionTestReplace.
1251 /// TODO: This fails for pointer-type loop counters with greater than one byte
1252 /// strides, consequently preventing LFTR from running. For the purpose of LFTR
1253 /// we could skip this check in the case that the LFTR loop counter (chosen by
1254 /// FindLoopCounter) is also pointer type. Instead, we could directly convert
1255 /// the loop test to an inequality test by checking the target data's alignment
1256 /// of element types (given that the initial pointer value originates from or is
1257 /// used by ABI constrained operation, as opposed to inttoptr/ptrtoint).
1258 /// However, we don't yet have a strong motivation for converting loop tests
1259 /// into inequality tests.
1260 static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
1261 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
1262 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
1263 BackedgeTakenCount->isZero())
1266 if (!L->getExitingBlock())
1269 // Can't rewrite non-branch yet.
1270 BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
1274 SmallPtrSet<const SCEV*, 8> Processed;
1275 if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE))
1281 /// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop
1282 /// invariant value to the phi.
1283 static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
1284 Instruction *IncI = dyn_cast<Instruction>(IncV);
1288 switch (IncI->getOpcode()) {
1289 case Instruction::Add:
1290 case Instruction::Sub:
1292 case Instruction::GetElementPtr:
1293 // An IV counter must preserve its type.
1294 if (IncI->getNumOperands() == 2)
1300 PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
1301 if (Phi && Phi->getParent() == L->getHeader()) {
1302 if (isLoopInvariant(IncI->getOperand(1), L, DT))
1306 if (IncI->getOpcode() == Instruction::GetElementPtr)
1309 // Allow add/sub to be commuted.
1310 Phi = dyn_cast<PHINode>(IncI->getOperand(1));
1311 if (Phi && Phi->getParent() == L->getHeader()) {
1312 if (isLoopInvariant(IncI->getOperand(0), L, DT))
1318 /// Return the compare guarding the loop latch, or NULL for unrecognized tests.
1319 static ICmpInst *getLoopTest(Loop *L) {
1320 assert(L->getExitingBlock() && "expected loop exit");
1322 BasicBlock *LatchBlock = L->getLoopLatch();
1323 // Don't bother with LFTR if the loop is not properly simplified.
1327 BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
1328 assert(BI && "expected exit branch");
1330 return dyn_cast<ICmpInst>(BI->getCondition());
1333 /// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show
1334 /// that the current exit test is already sufficiently canonical.
1335 static bool needsLFTR(Loop *L, DominatorTree *DT) {
1336 // Do LFTR to simplify the exit condition to an ICMP.
1337 ICmpInst *Cond = getLoopTest(L);
1341 // Do LFTR to simplify the exit ICMP to EQ/NE
1342 ICmpInst::Predicate Pred = Cond->getPredicate();
1343 if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ)
1346 // Look for a loop invariant RHS
1347 Value *LHS = Cond->getOperand(0);
1348 Value *RHS = Cond->getOperand(1);
1349 if (!isLoopInvariant(RHS, L, DT)) {
1350 if (!isLoopInvariant(LHS, L, DT))
1352 std::swap(LHS, RHS);
1354 // Look for a simple IV counter LHS
1355 PHINode *Phi = dyn_cast<PHINode>(LHS);
1357 Phi = getLoopPhiForCounter(LHS, L, DT);
1362 // Do LFTR if PHI node is defined in the loop, but is *not* a counter.
1363 int Idx = Phi->getBasicBlockIndex(L->getLoopLatch());
1367 // Do LFTR if the exit condition's IV is *not* a simple counter.
1368 Value *IncV = Phi->getIncomingValue(Idx);
1369 return Phi != getLoopPhiForCounter(IncV, L, DT);
1372 /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils
1373 /// down to checking that all operands are constant and listing instructions
1374 /// that may hide undef.
1375 static bool hasConcreteDefImpl(Value *V, SmallPtrSet<Value*, 8> &Visited,
1377 if (isa<Constant>(V))
1378 return !isa<UndefValue>(V);
1383 // Conservatively handle non-constant non-instructions. For example, Arguments
1385 Instruction *I = dyn_cast<Instruction>(V);
1389 // Load and return values may be undef.
1390 if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I))
1393 // Optimistically handle other instructions.
1394 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
1395 if (!Visited.insert(*OI))
1397 if (!hasConcreteDefImpl(*OI, Visited, Depth+1))
1403 /// Return true if the given value is concrete. We must prove that undef can
1406 /// TODO: If we decide that this is a good approach to checking for undef, we
1407 /// may factor it into a common location.
1408 static bool hasConcreteDef(Value *V) {
1409 SmallPtrSet<Value*, 8> Visited;
1411 return hasConcreteDefImpl(V, Visited, 0);
1414 /// AlmostDeadIV - Return true if this IV has any uses other than the (soon to
1415 /// be rewritten) loop exit test.
1416 static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
1417 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
1418 Value *IncV = Phi->getIncomingValue(LatchIdx);
1420 for (Value::use_iterator UI = Phi->use_begin(), UE = Phi->use_end();
1422 if (*UI != Cond && *UI != IncV) return false;
1425 for (Value::use_iterator UI = IncV->use_begin(), UE = IncV->use_end();
1427 if (*UI != Cond && *UI != Phi) return false;
1432 /// FindLoopCounter - Find an affine IV in canonical form.
1434 /// BECount may be an i8* pointer type. The pointer difference is already
1435 /// valid count without scaling the address stride, so it remains a pointer
1436 /// expression as far as SCEV is concerned.
1438 /// Currently only valid for LFTR. See the comments on hasConcreteDef below.
1440 /// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount
1442 /// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
1443 /// This is difficult in general for SCEV because of potential overflow. But we
1444 /// could at least handle constant BECounts.
1446 FindLoopCounter(Loop *L, const SCEV *BECount,
1447 ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
1448 uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
1451 cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition();
1453 // Loop over all of the PHI nodes, looking for a simple counter.
1454 PHINode *BestPhi = 0;
1455 const SCEV *BestInit = 0;
1456 BasicBlock *LatchBlock = L->getLoopLatch();
1457 assert(LatchBlock && "needsLFTR should guarantee a loop latch");
1459 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
1460 PHINode *Phi = cast<PHINode>(I);
1461 if (!SE->isSCEVable(Phi->getType()))
1464 // Avoid comparing an integer IV against a pointer Limit.
1465 if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy())
1468 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
1469 if (!AR || AR->getLoop() != L || !AR->isAffine())
1472 // AR may be a pointer type, while BECount is an integer type.
1473 // AR may be wider than BECount. With eq/ne tests overflow is immaterial.
1474 // AR may not be a narrower type, or we may never exit.
1475 uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
1476 if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))
1479 const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
1480 if (!Step || !Step->isOne())
1483 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
1484 Value *IncV = Phi->getIncomingValue(LatchIdx);
1485 if (getLoopPhiForCounter(IncV, L, DT) != Phi)
1488 // Avoid reusing a potentially undef value to compute other values that may
1489 // have originally had a concrete definition.
1490 if (!hasConcreteDef(Phi)) {
1491 // We explicitly allow unknown phis as long as they are already used by
1492 // the loop test. In this case we assume that performing LFTR could not
1493 // increase the number of undef users.
1494 if (ICmpInst *Cond = getLoopTest(L)) {
1495 if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT)
1496 && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) {
1501 const SCEV *Init = AR->getStart();
1503 if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) {
1504 // Don't force a live loop counter if another IV can be used.
1505 if (AlmostDeadIV(Phi, LatchBlock, Cond))
1508 // Prefer to count-from-zero. This is a more "canonical" counter form. It
1509 // also prefers integer to pointer IVs.
1510 if (BestInit->isZero() != Init->isZero()) {
1511 if (BestInit->isZero())
1514 // If two IVs both count from zero or both count from nonzero then the
1515 // narrower is likely a dead phi that has been widened. Use the wider phi
1516 // to allow the other to be eliminated.
1517 else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
1526 /// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
1527 /// holds the RHS of the new loop test.
1528 static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
1529 SCEVExpander &Rewriter, ScalarEvolution *SE) {
1530 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
1531 assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
1532 const SCEV *IVInit = AR->getStart();
1534 // IVInit may be a pointer while IVCount is an integer when FindLoopCounter
1535 // finds a valid pointer IV. Sign extend BECount in order to materialize a
1536 // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing
1537 // the existing GEPs whenever possible.
1538 if (IndVar->getType()->isPointerTy()
1539 && !IVCount->getType()->isPointerTy()) {
1541 // IVOffset will be the new GEP offset that is interpreted by GEP as a
1542 // signed value. IVCount on the other hand represents the loop trip count,
1543 // which is an unsigned value. FindLoopCounter only allows induction
1544 // variables that have a positive unit stride of one. This means we don't
1545 // have to handle the case of negative offsets (yet) and just need to zero
1547 Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType());
1548 const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy);
1550 // Expand the code for the iteration count.
1551 assert(SE->isLoopInvariant(IVOffset, L) &&
1552 "Computed iteration count is not loop invariant!");
1553 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
1554 Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI);
1556 Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader());
1557 assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter");
1558 // We could handle pointer IVs other than i8*, but we need to compensate for
1559 // gep index scaling. See canExpandBackedgeTakenCount comments.
1560 assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()),
1561 cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
1562 && "unit stride pointer IV must be i8*");
1564 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
1565 return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit");
1568 // In any other case, convert both IVInit and IVCount to integers before
1569 // comparing. This may result in SCEV expension of pointers, but in practice
1570 // SCEV will fold the pointer arithmetic away as such:
1571 // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc).
1573 // Valid Cases: (1) both integers is most common; (2) both may be pointers
1574 // for simple memset-style loops.
1576 // IVInit integer and IVCount pointer would only occur if a canonical IV
1577 // were generated on top of case #2, which is not expected.
1579 const SCEV *IVLimit = 0;
1580 // For unit stride, IVCount = Start + BECount with 2's complement overflow.
1581 // For non-zero Start, compute IVCount here.
1582 if (AR->getStart()->isZero())
1585 assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
1586 const SCEV *IVInit = AR->getStart();
1588 // For integer IVs, truncate the IV before computing IVInit + BECount.
1589 if (SE->getTypeSizeInBits(IVInit->getType())
1590 > SE->getTypeSizeInBits(IVCount->getType()))
1591 IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
1593 IVLimit = SE->getAddExpr(IVInit, IVCount);
1595 // Expand the code for the iteration count.
1596 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
1597 IRBuilder<> Builder(BI);
1598 assert(SE->isLoopInvariant(IVLimit, L) &&
1599 "Computed iteration count is not loop invariant!");
1600 // Ensure that we generate the same type as IndVar, or a smaller integer
1601 // type. In the presence of null pointer values, we have an integer type
1602 // SCEV expression (IVInit) for a pointer type IV value (IndVar).
1603 Type *LimitTy = IVCount->getType()->isPointerTy() ?
1604 IndVar->getType() : IVCount->getType();
1605 return Rewriter.expandCodeFor(IVLimit, LimitTy, BI);
1609 /// LinearFunctionTestReplace - This method rewrites the exit condition of the
1610 /// loop to be a canonical != comparison against the incremented loop induction
1611 /// variable. This pass is able to rewrite the exit tests of any loop where the
1612 /// SCEV analysis can determine a loop-invariant trip count of the loop, which
1613 /// is actually a much broader range than just linear tests.
1614 Value *IndVarSimplify::
1615 LinearFunctionTestReplace(Loop *L,
1616 const SCEV *BackedgeTakenCount,
1618 SCEVExpander &Rewriter) {
1619 assert(canExpandBackedgeTakenCount(L, SE) && "precondition");
1621 // Initialize CmpIndVar and IVCount to their preincremented values.
1622 Value *CmpIndVar = IndVar;
1623 const SCEV *IVCount = BackedgeTakenCount;
1625 // If the exiting block is the same as the backedge block, we prefer to
1626 // compare against the post-incremented value, otherwise we must compare
1627 // against the preincremented value.
1628 if (L->getExitingBlock() == L->getLoopLatch()) {
1629 // Add one to the "backedge-taken" count to get the trip count.
1630 // This addition may overflow, which is valid as long as the comparison is
1631 // truncated to BackedgeTakenCount->getType().
1632 IVCount = SE->getAddExpr(BackedgeTakenCount,
1633 SE->getConstant(BackedgeTakenCount->getType(), 1));
1634 // The BackedgeTaken expression contains the number of times that the
1635 // backedge branches to the loop header. This is one less than the
1636 // number of times the loop executes, so use the incremented indvar.
1637 CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
1640 Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
1641 assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
1642 && "genLoopLimit missed a cast");
1644 // Insert a new icmp_ne or icmp_eq instruction before the branch.
1645 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
1646 ICmpInst::Predicate P;
1647 if (L->contains(BI->getSuccessor(0)))
1648 P = ICmpInst::ICMP_NE;
1650 P = ICmpInst::ICMP_EQ;
1652 DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
1653 << " LHS:" << *CmpIndVar << '\n'
1655 << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n"
1656 << " RHS:\t" << *ExitCnt << "\n"
1657 << " IVCount:\t" << *IVCount << "\n");
1659 IRBuilder<> Builder(BI);
1661 // LFTR can ignore IV overflow and truncate to the width of
1662 // BECount. This avoids materializing the add(zext(add)) expression.
1663 unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType());
1664 unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType());
1665 if (CmpIndVarSize > ExitCntSize) {
1666 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
1667 const SCEV *ARStart = AR->getStart();
1668 const SCEV *ARStep = AR->getStepRecurrence(*SE);
1669 // For constant IVCount, avoid truncation.
1670 if (isa<SCEVConstant>(ARStart) && isa<SCEVConstant>(IVCount)) {
1671 const APInt &Start = cast<SCEVConstant>(ARStart)->getValue()->getValue();
1672 APInt Count = cast<SCEVConstant>(IVCount)->getValue()->getValue();
1673 // Note that the post-inc value of BackedgeTakenCount may have overflowed
1674 // above such that IVCount is now zero.
1675 if (IVCount != BackedgeTakenCount && Count == 0) {
1676 Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize);
1680 Count = Count.zext(CmpIndVarSize);
1682 if (cast<SCEVConstant>(ARStep)->getValue()->isNegative())
1683 NewLimit = Start - Count;
1685 NewLimit = Start + Count;
1686 ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit);
1688 DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n");
1690 CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(),
1694 Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond");
1695 Value *OrigCond = BI->getCondition();
1696 // It's tempting to use replaceAllUsesWith here to fully replace the old
1697 // comparison, but that's not immediately safe, since users of the old
1698 // comparison may not be dominated by the new comparison. Instead, just
1699 // update the branch to use the new comparison; in the common case this
1700 // will make old comparison dead.
1701 BI->setCondition(Cond);
1702 DeadInsts.push_back(OrigCond);
1709 //===----------------------------------------------------------------------===//
1710 // SinkUnusedInvariants. A late subpass to cleanup loop preheaders.
1711 //===----------------------------------------------------------------------===//
1713 /// If there's a single exit block, sink any loop-invariant values that
1714 /// were defined in the preheader but not used inside the loop into the
1715 /// exit block to reduce register pressure in the loop.
1716 void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
1717 BasicBlock *ExitBlock = L->getExitBlock();
1718 if (!ExitBlock) return;
1720 BasicBlock *Preheader = L->getLoopPreheader();
1721 if (!Preheader) return;
1723 Instruction *InsertPt = ExitBlock->getFirstInsertionPt();
1724 BasicBlock::iterator I = Preheader->getTerminator();
1725 while (I != Preheader->begin()) {
1727 // New instructions were inserted at the end of the preheader.
1728 if (isa<PHINode>(I))
1731 // Don't move instructions which might have side effects, since the side
1732 // effects need to complete before instructions inside the loop. Also don't
1733 // move instructions which might read memory, since the loop may modify
1734 // memory. Note that it's okay if the instruction might have undefined
1735 // behavior: LoopSimplify guarantees that the preheader dominates the exit
1737 if (I->mayHaveSideEffects() || I->mayReadFromMemory())
1740 // Skip debug info intrinsics.
1741 if (isa<DbgInfoIntrinsic>(I))
1744 // Skip landingpad instructions.
1745 if (isa<LandingPadInst>(I))
1748 // Don't sink alloca: we never want to sink static alloca's out of the
1749 // entry block, and correctly sinking dynamic alloca's requires
1750 // checks for stacksave/stackrestore intrinsics.
1751 // FIXME: Refactor this check somehow?
1752 if (isa<AllocaInst>(I))
1755 // Determine if there is a use in or before the loop (direct or
1757 bool UsedInLoop = false;
1758 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
1761 BasicBlock *UseBB = cast<Instruction>(U)->getParent();
1762 if (PHINode *P = dyn_cast<PHINode>(U)) {
1764 PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
1765 UseBB = P->getIncomingBlock(i);
1767 if (UseBB == Preheader || L->contains(UseBB)) {
1773 // If there is, the def must remain in the preheader.
1777 // Otherwise, sink it to the exit block.
1778 Instruction *ToMove = I;
1781 if (I != Preheader->begin()) {
1782 // Skip debug info intrinsics.
1785 } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin());
1787 if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin())
1793 ToMove->moveBefore(InsertPt);
1799 //===----------------------------------------------------------------------===//
1800 // IndVarSimplify driver. Manage several subpasses of IV simplification.
1801 //===----------------------------------------------------------------------===//
1803 bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
1804 if (skipOptnoneFunction(L))
1807 // If LoopSimplify form is not available, stay out of trouble. Some notes:
1808 // - LSR currently only supports LoopSimplify-form loops. Indvars'
1809 // canonicalization can be a pessimization without LSR to "clean up"
1811 // - We depend on having a preheader; in particular,
1812 // Loop::getCanonicalInductionVariable only supports loops with preheaders,
1813 // and we're in trouble if we can't find the induction variable even when
1814 // we've manually inserted one.
1815 if (!L->isLoopSimplifyForm())
1818 LI = &getAnalysis<LoopInfo>();
1819 SE = &getAnalysis<ScalarEvolution>();
1820 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1821 TD = getAnalysisIfAvailable<DataLayout>();
1822 TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
1827 // If there are any floating-point recurrences, attempt to
1828 // transform them to use integer recurrences.
1829 RewriteNonIntegerIVs(L);
1831 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
1833 // Create a rewriter object which we'll use to transform the code with.
1834 SCEVExpander Rewriter(*SE, "indvars");
1836 Rewriter.setDebugType(DEBUG_TYPE);
1839 // Eliminate redundant IV users.
1841 // Simplification works best when run before other consumers of SCEV. We
1842 // attempt to avoid evaluating SCEVs for sign/zero extend operations until
1843 // other expressions involving loop IVs have been evaluated. This helps SCEV
1844 // set no-wrap flags before normalizing sign/zero extension.
1845 Rewriter.disableCanonicalMode();
1846 SimplifyAndExtend(L, Rewriter, LPM);
1848 // Check to see if this loop has a computable loop-invariant execution count.
1849 // If so, this means that we can compute the final value of any expressions
1850 // that are recurrent in the loop, and substitute the exit values from the
1851 // loop into any instructions outside of the loop that use the final values of
1852 // the current expressions.
1854 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1855 RewriteLoopExitValues(L, Rewriter);
1857 // Eliminate redundant IV cycles.
1858 NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts);
1860 // If we have a trip count expression, rewrite the loop's exit condition
1861 // using it. We can currently only handle loops with a single exit.
1862 if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
1863 PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
1865 // Check preconditions for proper SCEVExpander operation. SCEV does not
1866 // express SCEVExpander's dependencies, such as LoopSimplify. Instead any
1867 // pass that uses the SCEVExpander must do it. This does not work well for
1868 // loop passes because SCEVExpander makes assumptions about all loops,
1869 // while LoopPassManager only forces the current loop to be simplified.
1871 // FIXME: SCEV expansion has no way to bail out, so the caller must
1872 // explicitly check any assumptions made by SCEV. Brittle.
1873 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount);
1874 if (!AR || AR->getLoop()->getLoopPreheader())
1875 (void)LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar,
1879 // Clear the rewriter cache, because values that are in the rewriter's cache
1880 // can be deleted in the loop below, causing the AssertingVH in the cache to
1884 // Now that we're done iterating through lists, clean up any instructions
1885 // which are now dead.
1886 while (!DeadInsts.empty())
1887 if (Instruction *Inst =
1888 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
1889 RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
1891 // The Rewriter may not be used from this point on.
1893 // Loop-invariant instructions in the preheader that aren't used in the
1894 // loop may be sunk below the loop to reduce register pressure.
1895 SinkUnusedInvariants(L);
1897 // Clean up dead instructions.
1898 Changed |= DeleteDeadPHIs(L->getHeader(), TLI);
1899 // Check a post-condition.
1900 assert(L->isLCSSAForm(*DT) &&
1901 "Indvars did not leave the loop in lcssa form!");
1903 // Verify that LFTR, and any other change have not interfered with SCEV's
1904 // ability to compute trip count.
1906 if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1908 const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
1909 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
1910 SE->getTypeSizeInBits(NewBECount->getType()))
1911 NewBECount = SE->getTruncateOrNoop(NewBECount,
1912 BackedgeTakenCount->getType());
1914 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount,
1915 NewBECount->getType());
1916 assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV");