1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. These classes are reference counted, managed by the const SCEV *
18 // class. We only create one SCEV of a particular shape, so pointer-comparisons
19 // for equality are legal.
21 // One important aspect of the SCEV objects is that they are never cyclic, even
22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
24 // recurrence) then we represent it directly as a recurrence node, otherwise we
25 // represent it as a SCEVUnknown node.
27 // In addition to being able to represent expressions of various types, we also
28 // have folders that are used to build the *canonical* representation for a
29 // particular expression. These folders are capable of using a variety of
30 // rewrite rules to simplify the expressions.
32 // Once the folders are defined, we can implement the more interesting
33 // higher-level code, such as the code that recognizes PHI nodes of various
34 // types, computes the execution count of a loop, etc.
36 // TODO: We should use these routines and value representations to implement
37 // dependence analysis!
39 //===----------------------------------------------------------------------===//
41 // There are several good references for the techniques used in this analysis.
43 // Chains of recurrences -- a method to expedite the evaluation
44 // of closed-form functions
45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
47 // On computational properties of chains of recurrences
50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51 // Robert A. van Engelen
53 // Efficient Symbolic Analysis for Optimizing Compilers
54 // Robert A. van Engelen
56 // Using the chains of recurrences algebra for data dependence testing and
57 // induction variable substitution
58 // MS Thesis, Johnie Birch
60 //===----------------------------------------------------------------------===//
62 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
64 #include "llvm/Constants.h"
65 #include "llvm/DerivedTypes.h"
66 #include "llvm/GlobalVariable.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Analysis/ConstantFolding.h"
70 #include "llvm/Analysis/Dominators.h"
71 #include "llvm/Analysis/LoopInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/Assembly/Writer.h"
74 #include "llvm/Target/TargetData.h"
75 #include "llvm/Support/CommandLine.h"
76 #include "llvm/Support/Compiler.h"
77 #include "llvm/Support/ConstantRange.h"
78 #include "llvm/Support/GetElementPtrTypeIterator.h"
79 #include "llvm/Support/InstIterator.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/ADT/Statistic.h"
83 #include "llvm/ADT/STLExtras.h"
84 #include "llvm/ADT/SmallPtrSet.h"
88 STATISTIC(NumArrayLenItCounts,
89 "Number of trip counts computed with array length");
90 STATISTIC(NumTripCountsComputed,
91 "Number of loops with predictable loop counts");
92 STATISTIC(NumTripCountsNotComputed,
93 "Number of loops without predictable loop counts");
94 STATISTIC(NumBruteForceTripCountsComputed,
95 "Number of loops with trip counts computed by force");
97 static cl::opt<unsigned>
98 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
99 cl::desc("Maximum number of iterations SCEV will "
100 "symbolically execute a constant "
104 static RegisterPass<ScalarEvolution>
105 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
106 char ScalarEvolution::ID = 0;
108 //===----------------------------------------------------------------------===//
109 // SCEV class definitions
110 //===----------------------------------------------------------------------===//
112 //===----------------------------------------------------------------------===//
113 // Implementation of the SCEV class.
118 void SCEV::dump() const {
123 void SCEV::print(std::ostream &o) const {
124 raw_os_ostream OS(o);
128 bool SCEV::isZero() const {
129 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
130 return SC->getValue()->isZero();
134 bool SCEV::isOne() const {
135 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
136 return SC->getValue()->isOne();
140 bool SCEV::isAllOnesValue() const {
141 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
142 return SC->getValue()->isAllOnesValue();
146 SCEVCouldNotCompute::SCEVCouldNotCompute() :
147 SCEV(scCouldNotCompute) {}
149 void SCEVCouldNotCompute::Profile(FoldingSetNodeID &ID) const {
150 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
153 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
154 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
158 const Type *SCEVCouldNotCompute::getType() const {
159 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
163 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
164 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
169 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
172 ScalarEvolution &SE) const {
176 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
177 OS << "***COULDNOTCOMPUTE***";
180 bool SCEVCouldNotCompute::classof(const SCEV *S) {
181 return S->getSCEVType() == scCouldNotCompute;
184 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
186 ID.AddInteger(scConstant);
189 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
190 SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
191 new (S) SCEVConstant(V);
192 UniqueSCEVs.InsertNode(S, IP);
196 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
197 return getConstant(ConstantInt::get(Val));
201 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
202 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
205 void SCEVConstant::Profile(FoldingSetNodeID &ID) const {
206 ID.AddInteger(scConstant);
210 const Type *SCEVConstant::getType() const { return V->getType(); }
212 void SCEVConstant::print(raw_ostream &OS) const {
213 WriteAsOperand(OS, V, false);
216 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy,
217 const SCEV *op, const Type *ty)
218 : SCEV(SCEVTy), Op(op), Ty(ty) {}
220 void SCEVCastExpr::Profile(FoldingSetNodeID &ID) const {
221 ID.AddInteger(getSCEVType());
226 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
227 return Op->dominates(BB, DT);
230 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV *op, const Type *ty)
231 : SCEVCastExpr(scTruncate, op, ty) {
232 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
233 (Ty->isInteger() || isa<PointerType>(Ty)) &&
234 "Cannot truncate non-integer value!");
237 void SCEVTruncateExpr::print(raw_ostream &OS) const {
238 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
241 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV *op, const Type *ty)
242 : SCEVCastExpr(scZeroExtend, op, ty) {
243 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
244 (Ty->isInteger() || isa<PointerType>(Ty)) &&
245 "Cannot zero extend non-integer value!");
248 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
249 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
252 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV *op, const Type *ty)
253 : SCEVCastExpr(scSignExtend, op, ty) {
254 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
255 (Ty->isInteger() || isa<PointerType>(Ty)) &&
256 "Cannot sign extend non-integer value!");
259 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
260 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
263 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
264 assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
265 const char *OpStr = getOperationStr();
266 OS << "(" << *Operands[0];
267 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
268 OS << OpStr << *Operands[i];
273 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
276 ScalarEvolution &SE) const {
277 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
279 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
280 if (H != getOperand(i)) {
281 SmallVector<const SCEV *, 8> NewOps;
282 NewOps.reserve(getNumOperands());
283 for (unsigned j = 0; j != i; ++j)
284 NewOps.push_back(getOperand(j));
286 for (++i; i != e; ++i)
287 NewOps.push_back(getOperand(i)->
288 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
290 if (isa<SCEVAddExpr>(this))
291 return SE.getAddExpr(NewOps);
292 else if (isa<SCEVMulExpr>(this))
293 return SE.getMulExpr(NewOps);
294 else if (isa<SCEVSMaxExpr>(this))
295 return SE.getSMaxExpr(NewOps);
296 else if (isa<SCEVUMaxExpr>(this))
297 return SE.getUMaxExpr(NewOps);
299 assert(0 && "Unknown commutative expr!");
305 void SCEVNAryExpr::Profile(FoldingSetNodeID &ID) const {
306 ID.AddInteger(getSCEVType());
307 ID.AddInteger(Operands.size());
308 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
309 ID.AddPointer(Operands[i]);
312 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
313 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
314 if (!getOperand(i)->dominates(BB, DT))
320 void SCEVUDivExpr::Profile(FoldingSetNodeID &ID) const {
321 ID.AddInteger(scUDivExpr);
326 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
327 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
330 void SCEVUDivExpr::print(raw_ostream &OS) const {
331 OS << "(" << *LHS << " /u " << *RHS << ")";
334 const Type *SCEVUDivExpr::getType() const {
335 // In most cases the types of LHS and RHS will be the same, but in some
336 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
337 // depend on the type for correctness, but handling types carefully can
338 // avoid extra casts in the SCEVExpander. The LHS is more likely to be
339 // a pointer type than the RHS, so use the RHS' type here.
340 return RHS->getType();
343 void SCEVAddRecExpr::Profile(FoldingSetNodeID &ID) const {
344 ID.AddInteger(scAddRecExpr);
345 ID.AddInteger(Operands.size());
346 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
347 ID.AddPointer(Operands[i]);
352 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
354 ScalarEvolution &SE) const {
355 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
357 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
358 if (H != getOperand(i)) {
359 SmallVector<const SCEV *, 8> NewOps;
360 NewOps.reserve(getNumOperands());
361 for (unsigned j = 0; j != i; ++j)
362 NewOps.push_back(getOperand(j));
364 for (++i; i != e; ++i)
365 NewOps.push_back(getOperand(i)->
366 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
368 return SE.getAddRecExpr(NewOps, L);
375 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
376 // Add recurrences are never invariant in the function-body (null loop).
380 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
381 if (QueryLoop->contains(L->getHeader()))
384 // This recurrence is variant w.r.t. QueryLoop if any of its operands
386 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
387 if (!getOperand(i)->isLoopInvariant(QueryLoop))
390 // Otherwise it's loop-invariant.
394 void SCEVAddRecExpr::print(raw_ostream &OS) const {
395 OS << "{" << *Operands[0];
396 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
397 OS << ",+," << *Operands[i];
398 OS << "}<" << L->getHeader()->getName() + ">";
401 void SCEVUnknown::Profile(FoldingSetNodeID &ID) const {
402 ID.AddInteger(scUnknown);
406 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
407 // All non-instruction values are loop invariant. All instructions are loop
408 // invariant if they are not contained in the specified loop.
409 // Instructions are never considered invariant in the function body
410 // (null loop) because they are defined within the "loop".
411 if (Instruction *I = dyn_cast<Instruction>(V))
412 return L && !L->contains(I->getParent());
416 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
417 if (Instruction *I = dyn_cast<Instruction>(getValue()))
418 return DT->dominates(I->getParent(), BB);
422 const Type *SCEVUnknown::getType() const {
426 void SCEVUnknown::print(raw_ostream &OS) const {
427 WriteAsOperand(OS, V, false);
430 //===----------------------------------------------------------------------===//
432 //===----------------------------------------------------------------------===//
435 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
436 /// than the complexity of the RHS. This comparator is used to canonicalize
438 class VISIBILITY_HIDDEN SCEVComplexityCompare {
441 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
443 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
444 // Primarily, sort the SCEVs by their getSCEVType().
445 if (LHS->getSCEVType() != RHS->getSCEVType())
446 return LHS->getSCEVType() < RHS->getSCEVType();
448 // Aside from the getSCEVType() ordering, the particular ordering
449 // isn't very important except that it's beneficial to be consistent,
450 // so that (a + b) and (b + a) don't end up as different expressions.
452 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
453 // not as complete as it could be.
454 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
455 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
457 // Order pointer values after integer values. This helps SCEVExpander
459 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
461 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
464 // Compare getValueID values.
465 if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
466 return LU->getValue()->getValueID() < RU->getValue()->getValueID();
468 // Sort arguments by their position.
469 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
470 const Argument *RA = cast<Argument>(RU->getValue());
471 return LA->getArgNo() < RA->getArgNo();
474 // For instructions, compare their loop depth, and their opcode.
475 // This is pretty loose.
476 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
477 Instruction *RV = cast<Instruction>(RU->getValue());
479 // Compare loop depths.
480 if (LI->getLoopDepth(LV->getParent()) !=
481 LI->getLoopDepth(RV->getParent()))
482 return LI->getLoopDepth(LV->getParent()) <
483 LI->getLoopDepth(RV->getParent());
486 if (LV->getOpcode() != RV->getOpcode())
487 return LV->getOpcode() < RV->getOpcode();
489 // Compare the number of operands.
490 if (LV->getNumOperands() != RV->getNumOperands())
491 return LV->getNumOperands() < RV->getNumOperands();
497 // Compare constant values.
498 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
499 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
500 if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
501 return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
502 return LC->getValue()->getValue().ult(RC->getValue()->getValue());
505 // Compare addrec loop depths.
506 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
507 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
508 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
509 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
512 // Lexicographically compare n-ary expressions.
513 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
514 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
515 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
516 if (i >= RC->getNumOperands())
518 if (operator()(LC->getOperand(i), RC->getOperand(i)))
520 if (operator()(RC->getOperand(i), LC->getOperand(i)))
523 return LC->getNumOperands() < RC->getNumOperands();
526 // Lexicographically compare udiv expressions.
527 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
528 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
529 if (operator()(LC->getLHS(), RC->getLHS()))
531 if (operator()(RC->getLHS(), LC->getLHS()))
533 if (operator()(LC->getRHS(), RC->getRHS()))
535 if (operator()(RC->getRHS(), LC->getRHS()))
540 // Compare cast expressions by operand.
541 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
542 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
543 return operator()(LC->getOperand(), RC->getOperand());
546 assert(0 && "Unknown SCEV kind!");
552 /// GroupByComplexity - Given a list of SCEV objects, order them by their
553 /// complexity, and group objects of the same complexity together by value.
554 /// When this routine is finished, we know that any duplicates in the vector are
555 /// consecutive and that complexity is monotonically increasing.
557 /// Note that we go take special precautions to ensure that we get determinstic
558 /// results from this routine. In other words, we don't want the results of
559 /// this to depend on where the addresses of various SCEV objects happened to
562 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
564 if (Ops.size() < 2) return; // Noop
565 if (Ops.size() == 2) {
566 // This is the common case, which also happens to be trivially simple.
568 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
569 std::swap(Ops[0], Ops[1]);
573 // Do the rough sort by complexity.
574 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
576 // Now that we are sorted by complexity, group elements of the same
577 // complexity. Note that this is, at worst, N^2, but the vector is likely to
578 // be extremely short in practice. Note that we take this approach because we
579 // do not want to depend on the addresses of the objects we are grouping.
580 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
581 const SCEV *S = Ops[i];
582 unsigned Complexity = S->getSCEVType();
584 // If there are any objects of the same complexity and same value as this
586 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
587 if (Ops[j] == S) { // Found a duplicate.
588 // Move it to immediately after i'th element.
589 std::swap(Ops[i+1], Ops[j]);
590 ++i; // no need to rescan it.
591 if (i == e-2) return; // Done!
599 //===----------------------------------------------------------------------===//
600 // Simple SCEV method implementations
601 //===----------------------------------------------------------------------===//
603 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
605 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
607 const Type* ResultTy) {
608 // Handle the simplest case efficiently.
610 return SE.getTruncateOrZeroExtend(It, ResultTy);
612 // We are using the following formula for BC(It, K):
614 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
616 // Suppose, W is the bitwidth of the return value. We must be prepared for
617 // overflow. Hence, we must assure that the result of our computation is
618 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
619 // safe in modular arithmetic.
621 // However, this code doesn't use exactly that formula; the formula it uses
622 // is something like the following, where T is the number of factors of 2 in
623 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
626 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
628 // This formula is trivially equivalent to the previous formula. However,
629 // this formula can be implemented much more efficiently. The trick is that
630 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
631 // arithmetic. To do exact division in modular arithmetic, all we have
632 // to do is multiply by the inverse. Therefore, this step can be done at
635 // The next issue is how to safely do the division by 2^T. The way this
636 // is done is by doing the multiplication step at a width of at least W + T
637 // bits. This way, the bottom W+T bits of the product are accurate. Then,
638 // when we perform the division by 2^T (which is equivalent to a right shift
639 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
640 // truncated out after the division by 2^T.
642 // In comparison to just directly using the first formula, this technique
643 // is much more efficient; using the first formula requires W * K bits,
644 // but this formula less than W + K bits. Also, the first formula requires
645 // a division step, whereas this formula only requires multiplies and shifts.
647 // It doesn't matter whether the subtraction step is done in the calculation
648 // width or the input iteration count's width; if the subtraction overflows,
649 // the result must be zero anyway. We prefer here to do it in the width of
650 // the induction variable because it helps a lot for certain cases; CodeGen
651 // isn't smart enough to ignore the overflow, which leads to much less
652 // efficient code if the width of the subtraction is wider than the native
655 // (It's possible to not widen at all by pulling out factors of 2 before
656 // the multiplication; for example, K=2 can be calculated as
657 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
658 // extra arithmetic, so it's not an obvious win, and it gets
659 // much more complicated for K > 3.)
661 // Protection from insane SCEVs; this bound is conservative,
662 // but it probably doesn't matter.
664 return SE.getCouldNotCompute();
666 unsigned W = SE.getTypeSizeInBits(ResultTy);
668 // Calculate K! / 2^T and T; we divide out the factors of two before
669 // multiplying for calculating K! / 2^T to avoid overflow.
670 // Other overflow doesn't matter because we only care about the bottom
671 // W bits of the result.
672 APInt OddFactorial(W, 1);
674 for (unsigned i = 3; i <= K; ++i) {
676 unsigned TwoFactors = Mult.countTrailingZeros();
678 Mult = Mult.lshr(TwoFactors);
679 OddFactorial *= Mult;
682 // We need at least W + T bits for the multiplication step
683 unsigned CalculationBits = W + T;
685 // Calcuate 2^T, at width T+W.
686 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
688 // Calculate the multiplicative inverse of K! / 2^T;
689 // this multiplication factor will perform the exact division by
691 APInt Mod = APInt::getSignedMinValue(W+1);
692 APInt MultiplyFactor = OddFactorial.zext(W+1);
693 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
694 MultiplyFactor = MultiplyFactor.trunc(W);
696 // Calculate the product, at width T+W
697 const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
698 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
699 for (unsigned i = 1; i != K; ++i) {
700 const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
701 Dividend = SE.getMulExpr(Dividend,
702 SE.getTruncateOrZeroExtend(S, CalculationTy));
706 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
708 // Truncate the result, and divide by K! / 2^T.
710 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
711 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
714 /// evaluateAtIteration - Return the value of this chain of recurrences at
715 /// the specified iteration number. We can evaluate this recurrence by
716 /// multiplying each element in the chain by the binomial coefficient
717 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
719 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
721 /// where BC(It, k) stands for binomial coefficient.
723 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
724 ScalarEvolution &SE) const {
725 const SCEV *Result = getStart();
726 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
727 // The computation is correct in the face of overflow provided that the
728 // multiplication is performed _after_ the evaluation of the binomial
730 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
731 if (isa<SCEVCouldNotCompute>(Coeff))
734 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
739 //===----------------------------------------------------------------------===//
740 // SCEV Expression folder implementations
741 //===----------------------------------------------------------------------===//
743 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
745 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
746 "This is not a truncating conversion!");
747 assert(isSCEVable(Ty) &&
748 "This is not a conversion to a SCEVable type!");
749 Ty = getEffectiveSCEVType(Ty);
751 // Fold if the operand is constant.
752 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
754 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
756 // trunc(trunc(x)) --> trunc(x)
757 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
758 return getTruncateExpr(ST->getOperand(), Ty);
760 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
761 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
762 return getTruncateOrSignExtend(SS->getOperand(), Ty);
764 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
765 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
766 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
768 // If the input value is a chrec scev, truncate the chrec's operands.
769 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
770 SmallVector<const SCEV *, 4> Operands;
771 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
772 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
773 return getAddRecExpr(Operands, AddRec->getLoop());
777 ID.AddInteger(scTruncate);
781 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
782 SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
783 new (S) SCEVTruncateExpr(Op, Ty);
784 UniqueSCEVs.InsertNode(S, IP);
788 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
790 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
791 "This is not an extending conversion!");
792 assert(isSCEVable(Ty) &&
793 "This is not a conversion to a SCEVable type!");
794 Ty = getEffectiveSCEVType(Ty);
796 // Fold if the operand is constant.
797 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
798 const Type *IntTy = getEffectiveSCEVType(Ty);
799 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
800 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
801 return getConstant(cast<ConstantInt>(C));
804 // zext(zext(x)) --> zext(x)
805 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
806 return getZeroExtendExpr(SZ->getOperand(), Ty);
808 // If the input value is a chrec scev, and we can prove that the value
809 // did not overflow the old, smaller, value, we can zero extend all of the
810 // operands (often constants). This allows analysis of something like
811 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
812 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
813 if (AR->isAffine()) {
814 // Check whether the backedge-taken count is SCEVCouldNotCompute.
815 // Note that this serves two purposes: It filters out loops that are
816 // simply not analyzable, and it covers the case where this code is
817 // being called from within backedge-taken count analysis, such that
818 // attempting to ask for the backedge-taken count would likely result
819 // in infinite recursion. In the later case, the analysis code will
820 // cope with a conservative value, and it will take care to purge
821 // that value once it has finished.
822 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
823 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
824 // Manually compute the final value for AR, checking for
826 const SCEV *Start = AR->getStart();
827 const SCEV *Step = AR->getStepRecurrence(*this);
829 // Check whether the backedge-taken count can be losslessly casted to
830 // the addrec's type. The count is always unsigned.
831 const SCEV *CastedMaxBECount =
832 getTruncateOrZeroExtend(MaxBECount, Start->getType());
833 const SCEV *RecastedMaxBECount =
834 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
835 if (MaxBECount == RecastedMaxBECount) {
837 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
838 // Check whether Start+Step*MaxBECount has no unsigned overflow.
840 getMulExpr(CastedMaxBECount,
841 getTruncateOrZeroExtend(Step, Start->getType()));
842 const SCEV *Add = getAddExpr(Start, ZMul);
843 const SCEV *OperandExtendedAdd =
844 getAddExpr(getZeroExtendExpr(Start, WideTy),
845 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
846 getZeroExtendExpr(Step, WideTy)));
847 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
848 // Return the expression with the addrec on the outside.
849 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
850 getZeroExtendExpr(Step, Ty),
853 // Similar to above, only this time treat the step value as signed.
854 // This covers loops that count down.
856 getMulExpr(CastedMaxBECount,
857 getTruncateOrSignExtend(Step, Start->getType()));
858 Add = getAddExpr(Start, SMul);
860 getAddExpr(getZeroExtendExpr(Start, WideTy),
861 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
862 getSignExtendExpr(Step, WideTy)));
863 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
864 // Return the expression with the addrec on the outside.
865 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
866 getSignExtendExpr(Step, Ty),
873 ID.AddInteger(scZeroExtend);
877 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
878 SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
879 new (S) SCEVZeroExtendExpr(Op, Ty);
880 UniqueSCEVs.InsertNode(S, IP);
884 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
886 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
887 "This is not an extending conversion!");
888 assert(isSCEVable(Ty) &&
889 "This is not a conversion to a SCEVable type!");
890 Ty = getEffectiveSCEVType(Ty);
892 // Fold if the operand is constant.
893 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
894 const Type *IntTy = getEffectiveSCEVType(Ty);
895 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
896 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
897 return getConstant(cast<ConstantInt>(C));
900 // sext(sext(x)) --> sext(x)
901 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
902 return getSignExtendExpr(SS->getOperand(), Ty);
904 // If the input value is a chrec scev, and we can prove that the value
905 // did not overflow the old, smaller, value, we can sign extend all of the
906 // operands (often constants). This allows analysis of something like
907 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
908 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
909 if (AR->isAffine()) {
910 // Check whether the backedge-taken count is SCEVCouldNotCompute.
911 // Note that this serves two purposes: It filters out loops that are
912 // simply not analyzable, and it covers the case where this code is
913 // being called from within backedge-taken count analysis, such that
914 // attempting to ask for the backedge-taken count would likely result
915 // in infinite recursion. In the later case, the analysis code will
916 // cope with a conservative value, and it will take care to purge
917 // that value once it has finished.
918 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
919 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
920 // Manually compute the final value for AR, checking for
922 const SCEV *Start = AR->getStart();
923 const SCEV *Step = AR->getStepRecurrence(*this);
925 // Check whether the backedge-taken count can be losslessly casted to
926 // the addrec's type. The count is always unsigned.
927 const SCEV *CastedMaxBECount =
928 getTruncateOrZeroExtend(MaxBECount, Start->getType());
929 const SCEV *RecastedMaxBECount =
930 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
931 if (MaxBECount == RecastedMaxBECount) {
933 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
934 // Check whether Start+Step*MaxBECount has no signed overflow.
936 getMulExpr(CastedMaxBECount,
937 getTruncateOrSignExtend(Step, Start->getType()));
938 const SCEV *Add = getAddExpr(Start, SMul);
939 const SCEV *OperandExtendedAdd =
940 getAddExpr(getSignExtendExpr(Start, WideTy),
941 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
942 getSignExtendExpr(Step, WideTy)));
943 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
944 // Return the expression with the addrec on the outside.
945 return getAddRecExpr(getSignExtendExpr(Start, Ty),
946 getSignExtendExpr(Step, Ty),
953 ID.AddInteger(scSignExtend);
957 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
958 SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
959 new (S) SCEVSignExtendExpr(Op, Ty);
960 UniqueSCEVs.InsertNode(S, IP);
964 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
965 /// unspecified bits out to the given type.
967 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
969 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
970 "This is not an extending conversion!");
971 assert(isSCEVable(Ty) &&
972 "This is not a conversion to a SCEVable type!");
973 Ty = getEffectiveSCEVType(Ty);
975 // Sign-extend negative constants.
976 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
977 if (SC->getValue()->getValue().isNegative())
978 return getSignExtendExpr(Op, Ty);
980 // Peel off a truncate cast.
981 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
982 const SCEV *NewOp = T->getOperand();
983 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
984 return getAnyExtendExpr(NewOp, Ty);
985 return getTruncateOrNoop(NewOp, Ty);
988 // Next try a zext cast. If the cast is folded, use it.
989 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
990 if (!isa<SCEVZeroExtendExpr>(ZExt))
993 // Next try a sext cast. If the cast is folded, use it.
994 const SCEV *SExt = getSignExtendExpr(Op, Ty);
995 if (!isa<SCEVSignExtendExpr>(SExt))
998 // If the expression is obviously signed, use the sext cast value.
999 if (isa<SCEVSMaxExpr>(Op))
1002 // Absent any other information, use the zext cast value.
1006 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1007 /// a list of operands to be added under the given scale, update the given
1008 /// map. This is a helper function for getAddRecExpr. As an example of
1009 /// what it does, given a sequence of operands that would form an add
1010 /// expression like this:
1012 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1014 /// where A and B are constants, update the map with these values:
1016 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1018 /// and add 13 + A*B*29 to AccumulatedConstant.
1019 /// This will allow getAddRecExpr to produce this:
1021 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1023 /// This form often exposes folding opportunities that are hidden in
1024 /// the original operand list.
1026 /// Return true iff it appears that any interesting folding opportunities
1027 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1028 /// the common case where no interesting opportunities are present, and
1029 /// is also used as a check to avoid infinite recursion.
1032 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1033 SmallVector<const SCEV *, 8> &NewOps,
1034 APInt &AccumulatedConstant,
1035 const SmallVectorImpl<const SCEV *> &Ops,
1037 ScalarEvolution &SE) {
1038 bool Interesting = false;
1040 // Iterate over the add operands.
1041 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1042 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1043 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1045 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1046 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1047 // A multiplication of a constant with another add; recurse.
1049 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1050 cast<SCEVAddExpr>(Mul->getOperand(1))
1054 // A multiplication of a constant with some other value. Update
1056 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1057 const SCEV *Key = SE.getMulExpr(MulOps);
1058 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1059 M.insert(std::make_pair(Key, NewScale));
1061 NewOps.push_back(Pair.first->first);
1063 Pair.first->second += NewScale;
1064 // The map already had an entry for this value, which may indicate
1065 // a folding opportunity.
1069 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1070 // Pull a buried constant out to the outside.
1071 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1073 AccumulatedConstant += Scale * C->getValue()->getValue();
1075 // An ordinary operand. Update the map.
1076 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1077 M.insert(std::make_pair(Ops[i], Scale));
1079 NewOps.push_back(Pair.first->first);
1081 Pair.first->second += Scale;
1082 // The map already had an entry for this value, which may indicate
1083 // a folding opportunity.
1093 struct APIntCompare {
1094 bool operator()(const APInt &LHS, const APInt &RHS) const {
1095 return LHS.ult(RHS);
1100 /// getAddExpr - Get a canonical add expression, or something simpler if
1102 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1103 assert(!Ops.empty() && "Cannot get empty add!");
1104 if (Ops.size() == 1) return Ops[0];
1106 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1107 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1108 getEffectiveSCEVType(Ops[0]->getType()) &&
1109 "SCEVAddExpr operand types don't match!");
1112 // Sort by complexity, this groups all similar expression types together.
1113 GroupByComplexity(Ops, LI);
1115 // If there are any constants, fold them together.
1117 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1119 assert(Idx < Ops.size());
1120 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1121 // We found two constants, fold them together!
1122 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1123 RHSC->getValue()->getValue());
1124 if (Ops.size() == 2) return Ops[0];
1125 Ops.erase(Ops.begin()+1); // Erase the folded element
1126 LHSC = cast<SCEVConstant>(Ops[0]);
1129 // If we are left with a constant zero being added, strip it off.
1130 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1131 Ops.erase(Ops.begin());
1136 if (Ops.size() == 1) return Ops[0];
1138 // Okay, check to see if the same value occurs in the operand list twice. If
1139 // so, merge them together into an multiply expression. Since we sorted the
1140 // list, these values are required to be adjacent.
1141 const Type *Ty = Ops[0]->getType();
1142 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1143 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1144 // Found a match, merge the two values into a multiply, and add any
1145 // remaining values to the result.
1146 const SCEV *Two = getIntegerSCEV(2, Ty);
1147 const SCEV *Mul = getMulExpr(Ops[i], Two);
1148 if (Ops.size() == 2)
1150 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1152 return getAddExpr(Ops);
1155 // Check for truncates. If all the operands are truncated from the same
1156 // type, see if factoring out the truncate would permit the result to be
1157 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1158 // if the contents of the resulting outer trunc fold to something simple.
1159 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1160 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1161 const Type *DstType = Trunc->getType();
1162 const Type *SrcType = Trunc->getOperand()->getType();
1163 SmallVector<const SCEV *, 8> LargeOps;
1165 // Check all the operands to see if they can be represented in the
1166 // source type of the truncate.
1167 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1168 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1169 if (T->getOperand()->getType() != SrcType) {
1173 LargeOps.push_back(T->getOperand());
1174 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1175 // This could be either sign or zero extension, but sign extension
1176 // is much more likely to be foldable here.
1177 LargeOps.push_back(getSignExtendExpr(C, SrcType));
1178 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1179 SmallVector<const SCEV *, 8> LargeMulOps;
1180 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1181 if (const SCEVTruncateExpr *T =
1182 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1183 if (T->getOperand()->getType() != SrcType) {
1187 LargeMulOps.push_back(T->getOperand());
1188 } else if (const SCEVConstant *C =
1189 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1190 // This could be either sign or zero extension, but sign extension
1191 // is much more likely to be foldable here.
1192 LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1199 LargeOps.push_back(getMulExpr(LargeMulOps));
1206 // Evaluate the expression in the larger type.
1207 const SCEV *Fold = getAddExpr(LargeOps);
1208 // If it folds to something simple, use it. Otherwise, don't.
1209 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1210 return getTruncateExpr(Fold, DstType);
1214 // Skip past any other cast SCEVs.
1215 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1218 // If there are add operands they would be next.
1219 if (Idx < Ops.size()) {
1220 bool DeletedAdd = false;
1221 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1222 // If we have an add, expand the add operands onto the end of the operands
1224 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1225 Ops.erase(Ops.begin()+Idx);
1229 // If we deleted at least one add, we added operands to the end of the list,
1230 // and they are not necessarily sorted. Recurse to resort and resimplify
1231 // any operands we just aquired.
1233 return getAddExpr(Ops);
1236 // Skip over the add expression until we get to a multiply.
1237 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1240 // Check to see if there are any folding opportunities present with
1241 // operands multiplied by constant values.
1242 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1243 uint64_t BitWidth = getTypeSizeInBits(Ty);
1244 DenseMap<const SCEV *, APInt> M;
1245 SmallVector<const SCEV *, 8> NewOps;
1246 APInt AccumulatedConstant(BitWidth, 0);
1247 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1248 Ops, APInt(BitWidth, 1), *this)) {
1249 // Some interesting folding opportunity is present, so its worthwhile to
1250 // re-generate the operands list. Group the operands by constant scale,
1251 // to avoid multiplying by the same constant scale multiple times.
1252 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1253 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1254 E = NewOps.end(); I != E; ++I)
1255 MulOpLists[M.find(*I)->second].push_back(*I);
1256 // Re-generate the operands list.
1258 if (AccumulatedConstant != 0)
1259 Ops.push_back(getConstant(AccumulatedConstant));
1260 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1261 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1263 Ops.push_back(getMulExpr(getConstant(I->first),
1264 getAddExpr(I->second)));
1266 return getIntegerSCEV(0, Ty);
1267 if (Ops.size() == 1)
1269 return getAddExpr(Ops);
1273 // If we are adding something to a multiply expression, make sure the
1274 // something is not already an operand of the multiply. If so, merge it into
1276 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1277 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1278 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1279 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1280 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1281 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1282 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1283 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1284 if (Mul->getNumOperands() != 2) {
1285 // If the multiply has more than two operands, we must get the
1287 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1288 MulOps.erase(MulOps.begin()+MulOp);
1289 InnerMul = getMulExpr(MulOps);
1291 const SCEV *One = getIntegerSCEV(1, Ty);
1292 const SCEV *AddOne = getAddExpr(InnerMul, One);
1293 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1294 if (Ops.size() == 2) return OuterMul;
1296 Ops.erase(Ops.begin()+AddOp);
1297 Ops.erase(Ops.begin()+Idx-1);
1299 Ops.erase(Ops.begin()+Idx);
1300 Ops.erase(Ops.begin()+AddOp-1);
1302 Ops.push_back(OuterMul);
1303 return getAddExpr(Ops);
1306 // Check this multiply against other multiplies being added together.
1307 for (unsigned OtherMulIdx = Idx+1;
1308 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1310 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1311 // If MulOp occurs in OtherMul, we can fold the two multiplies
1313 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1314 OMulOp != e; ++OMulOp)
1315 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1316 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1317 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1318 if (Mul->getNumOperands() != 2) {
1319 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1321 MulOps.erase(MulOps.begin()+MulOp);
1322 InnerMul1 = getMulExpr(MulOps);
1324 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1325 if (OtherMul->getNumOperands() != 2) {
1326 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1327 OtherMul->op_end());
1328 MulOps.erase(MulOps.begin()+OMulOp);
1329 InnerMul2 = getMulExpr(MulOps);
1331 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1332 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1333 if (Ops.size() == 2) return OuterMul;
1334 Ops.erase(Ops.begin()+Idx);
1335 Ops.erase(Ops.begin()+OtherMulIdx-1);
1336 Ops.push_back(OuterMul);
1337 return getAddExpr(Ops);
1343 // If there are any add recurrences in the operands list, see if any other
1344 // added values are loop invariant. If so, we can fold them into the
1346 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1349 // Scan over all recurrences, trying to fold loop invariants into them.
1350 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1351 // Scan all of the other operands to this add and add them to the vector if
1352 // they are loop invariant w.r.t. the recurrence.
1353 SmallVector<const SCEV *, 8> LIOps;
1354 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1355 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1356 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1357 LIOps.push_back(Ops[i]);
1358 Ops.erase(Ops.begin()+i);
1362 // If we found some loop invariants, fold them into the recurrence.
1363 if (!LIOps.empty()) {
1364 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1365 LIOps.push_back(AddRec->getStart());
1367 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1369 AddRecOps[0] = getAddExpr(LIOps);
1371 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1372 // If all of the other operands were loop invariant, we are done.
1373 if (Ops.size() == 1) return NewRec;
1375 // Otherwise, add the folded AddRec by the non-liv parts.
1376 for (unsigned i = 0;; ++i)
1377 if (Ops[i] == AddRec) {
1381 return getAddExpr(Ops);
1384 // Okay, if there weren't any loop invariants to be folded, check to see if
1385 // there are multiple AddRec's with the same loop induction variable being
1386 // added together. If so, we can fold them.
1387 for (unsigned OtherIdx = Idx+1;
1388 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1389 if (OtherIdx != Idx) {
1390 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1391 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1392 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
1393 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1395 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1396 if (i >= NewOps.size()) {
1397 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1398 OtherAddRec->op_end());
1401 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1403 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1405 if (Ops.size() == 2) return NewAddRec;
1407 Ops.erase(Ops.begin()+Idx);
1408 Ops.erase(Ops.begin()+OtherIdx-1);
1409 Ops.push_back(NewAddRec);
1410 return getAddExpr(Ops);
1414 // Otherwise couldn't fold anything into this recurrence. Move onto the
1418 // Okay, it looks like we really DO need an add expr. Check to see if we
1419 // already have one, otherwise create a new one.
1420 FoldingSetNodeID ID;
1421 ID.AddInteger(scAddExpr);
1422 ID.AddInteger(Ops.size());
1423 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1424 ID.AddPointer(Ops[i]);
1426 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1427 SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1428 new (S) SCEVAddExpr(Ops);
1429 UniqueSCEVs.InsertNode(S, IP);
1434 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1436 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1437 assert(!Ops.empty() && "Cannot get empty mul!");
1439 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1440 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1441 getEffectiveSCEVType(Ops[0]->getType()) &&
1442 "SCEVMulExpr operand types don't match!");
1445 // Sort by complexity, this groups all similar expression types together.
1446 GroupByComplexity(Ops, LI);
1448 // If there are any constants, fold them together.
1450 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1452 // C1*(C2+V) -> C1*C2 + C1*V
1453 if (Ops.size() == 2)
1454 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1455 if (Add->getNumOperands() == 2 &&
1456 isa<SCEVConstant>(Add->getOperand(0)))
1457 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1458 getMulExpr(LHSC, Add->getOperand(1)));
1462 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1463 // We found two constants, fold them together!
1464 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1465 RHSC->getValue()->getValue());
1466 Ops[0] = getConstant(Fold);
1467 Ops.erase(Ops.begin()+1); // Erase the folded element
1468 if (Ops.size() == 1) return Ops[0];
1469 LHSC = cast<SCEVConstant>(Ops[0]);
1472 // If we are left with a constant one being multiplied, strip it off.
1473 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1474 Ops.erase(Ops.begin());
1476 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1477 // If we have a multiply of zero, it will always be zero.
1482 // Skip over the add expression until we get to a multiply.
1483 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1486 if (Ops.size() == 1)
1489 // If there are mul operands inline them all into this expression.
1490 if (Idx < Ops.size()) {
1491 bool DeletedMul = false;
1492 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1493 // If we have an mul, expand the mul operands onto the end of the operands
1495 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1496 Ops.erase(Ops.begin()+Idx);
1500 // If we deleted at least one mul, we added operands to the end of the list,
1501 // and they are not necessarily sorted. Recurse to resort and resimplify
1502 // any operands we just aquired.
1504 return getMulExpr(Ops);
1507 // If there are any add recurrences in the operands list, see if any other
1508 // added values are loop invariant. If so, we can fold them into the
1510 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1513 // Scan over all recurrences, trying to fold loop invariants into them.
1514 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1515 // Scan all of the other operands to this mul and add them to the vector if
1516 // they are loop invariant w.r.t. the recurrence.
1517 SmallVector<const SCEV *, 8> LIOps;
1518 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1519 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1520 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1521 LIOps.push_back(Ops[i]);
1522 Ops.erase(Ops.begin()+i);
1526 // If we found some loop invariants, fold them into the recurrence.
1527 if (!LIOps.empty()) {
1528 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1529 SmallVector<const SCEV *, 4> NewOps;
1530 NewOps.reserve(AddRec->getNumOperands());
1531 if (LIOps.size() == 1) {
1532 const SCEV *Scale = LIOps[0];
1533 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1534 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1536 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1537 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1538 MulOps.push_back(AddRec->getOperand(i));
1539 NewOps.push_back(getMulExpr(MulOps));
1543 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1545 // If all of the other operands were loop invariant, we are done.
1546 if (Ops.size() == 1) return NewRec;
1548 // Otherwise, multiply the folded AddRec by the non-liv parts.
1549 for (unsigned i = 0;; ++i)
1550 if (Ops[i] == AddRec) {
1554 return getMulExpr(Ops);
1557 // Okay, if there weren't any loop invariants to be folded, check to see if
1558 // there are multiple AddRec's with the same loop induction variable being
1559 // multiplied together. If so, we can fold them.
1560 for (unsigned OtherIdx = Idx+1;
1561 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1562 if (OtherIdx != Idx) {
1563 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1564 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1565 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
1566 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1567 const SCEV *NewStart = getMulExpr(F->getStart(),
1569 const SCEV *B = F->getStepRecurrence(*this);
1570 const SCEV *D = G->getStepRecurrence(*this);
1571 const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1574 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1576 if (Ops.size() == 2) return NewAddRec;
1578 Ops.erase(Ops.begin()+Idx);
1579 Ops.erase(Ops.begin()+OtherIdx-1);
1580 Ops.push_back(NewAddRec);
1581 return getMulExpr(Ops);
1585 // Otherwise couldn't fold anything into this recurrence. Move onto the
1589 // Okay, it looks like we really DO need an mul expr. Check to see if we
1590 // already have one, otherwise create a new one.
1591 FoldingSetNodeID ID;
1592 ID.AddInteger(scMulExpr);
1593 ID.AddInteger(Ops.size());
1594 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1595 ID.AddPointer(Ops[i]);
1597 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1598 SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1599 new (S) SCEVMulExpr(Ops);
1600 UniqueSCEVs.InsertNode(S, IP);
1604 /// getUDivExpr - Get a canonical multiply expression, or something simpler if
1606 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1608 assert(getEffectiveSCEVType(LHS->getType()) ==
1609 getEffectiveSCEVType(RHS->getType()) &&
1610 "SCEVUDivExpr operand types don't match!");
1612 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1613 if (RHSC->getValue()->equalsInt(1))
1614 return LHS; // X udiv 1 --> x
1616 return getIntegerSCEV(0, LHS->getType()); // value is undefined
1618 // Determine if the division can be folded into the operands of
1620 // TODO: Generalize this to non-constants by using known-bits information.
1621 const Type *Ty = LHS->getType();
1622 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1623 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1624 // For non-power-of-two values, effectively round the value up to the
1625 // nearest power of two.
1626 if (!RHSC->getValue()->getValue().isPowerOf2())
1628 const IntegerType *ExtTy =
1629 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1630 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1631 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1632 if (const SCEVConstant *Step =
1633 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1634 if (!Step->getValue()->getValue()
1635 .urem(RHSC->getValue()->getValue()) &&
1636 getZeroExtendExpr(AR, ExtTy) ==
1637 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1638 getZeroExtendExpr(Step, ExtTy),
1640 SmallVector<const SCEV *, 4> Operands;
1641 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1642 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1643 return getAddRecExpr(Operands, AR->getLoop());
1645 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1646 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1647 SmallVector<const SCEV *, 4> Operands;
1648 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1649 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1650 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1651 // Find an operand that's safely divisible.
1652 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1653 const SCEV *Op = M->getOperand(i);
1654 const SCEV *Div = getUDivExpr(Op, RHSC);
1655 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1656 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1657 Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1660 return getMulExpr(Operands);
1664 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1665 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1666 SmallVector<const SCEV *, 4> Operands;
1667 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1668 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1669 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1671 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1672 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1673 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1675 Operands.push_back(Op);
1677 if (Operands.size() == A->getNumOperands())
1678 return getAddExpr(Operands);
1682 // Fold if both operands are constant.
1683 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1684 Constant *LHSCV = LHSC->getValue();
1685 Constant *RHSCV = RHSC->getValue();
1686 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1691 FoldingSetNodeID ID;
1692 ID.AddInteger(scUDivExpr);
1696 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1697 SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1698 new (S) SCEVUDivExpr(LHS, RHS);
1699 UniqueSCEVs.InsertNode(S, IP);
1704 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1705 /// Simplify the expression as much as possible.
1706 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1707 const SCEV *Step, const Loop *L) {
1708 SmallVector<const SCEV *, 4> Operands;
1709 Operands.push_back(Start);
1710 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1711 if (StepChrec->getLoop() == L) {
1712 Operands.insert(Operands.end(), StepChrec->op_begin(),
1713 StepChrec->op_end());
1714 return getAddRecExpr(Operands, L);
1717 Operands.push_back(Step);
1718 return getAddRecExpr(Operands, L);
1721 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1722 /// Simplify the expression as much as possible.
1724 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1726 if (Operands.size() == 1) return Operands[0];
1728 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1729 assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1730 getEffectiveSCEVType(Operands[0]->getType()) &&
1731 "SCEVAddRecExpr operand types don't match!");
1734 if (Operands.back()->isZero()) {
1735 Operands.pop_back();
1736 return getAddRecExpr(Operands, L); // {X,+,0} --> X
1739 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1740 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1741 const Loop* NestedLoop = NestedAR->getLoop();
1742 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1743 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1744 NestedAR->op_end());
1745 Operands[0] = NestedAR->getStart();
1746 // AddRecs require their operands be loop-invariant with respect to their
1747 // loops. Don't perform this transformation if it would break this
1749 bool AllInvariant = true;
1750 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1751 if (!Operands[i]->isLoopInvariant(L)) {
1752 AllInvariant = false;
1756 NestedOperands[0] = getAddRecExpr(Operands, L);
1757 AllInvariant = true;
1758 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1759 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1760 AllInvariant = false;
1764 // Ok, both add recurrences are valid after the transformation.
1765 return getAddRecExpr(NestedOperands, NestedLoop);
1767 // Reset Operands to its original state.
1768 Operands[0] = NestedAR;
1772 FoldingSetNodeID ID;
1773 ID.AddInteger(scAddRecExpr);
1774 ID.AddInteger(Operands.size());
1775 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1776 ID.AddPointer(Operands[i]);
1779 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1780 SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1781 new (S) SCEVAddRecExpr(Operands, L);
1782 UniqueSCEVs.InsertNode(S, IP);
1786 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1788 SmallVector<const SCEV *, 2> Ops;
1791 return getSMaxExpr(Ops);
1795 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1796 assert(!Ops.empty() && "Cannot get empty smax!");
1797 if (Ops.size() == 1) return Ops[0];
1799 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1800 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1801 getEffectiveSCEVType(Ops[0]->getType()) &&
1802 "SCEVSMaxExpr operand types don't match!");
1805 // Sort by complexity, this groups all similar expression types together.
1806 GroupByComplexity(Ops, LI);
1808 // If there are any constants, fold them together.
1810 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1812 assert(Idx < Ops.size());
1813 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1814 // We found two constants, fold them together!
1815 ConstantInt *Fold = ConstantInt::get(
1816 APIntOps::smax(LHSC->getValue()->getValue(),
1817 RHSC->getValue()->getValue()));
1818 Ops[0] = getConstant(Fold);
1819 Ops.erase(Ops.begin()+1); // Erase the folded element
1820 if (Ops.size() == 1) return Ops[0];
1821 LHSC = cast<SCEVConstant>(Ops[0]);
1824 // If we are left with a constant minimum-int, strip it off.
1825 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1826 Ops.erase(Ops.begin());
1828 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1829 // If we have an smax with a constant maximum-int, it will always be
1835 if (Ops.size() == 1) return Ops[0];
1837 // Find the first SMax
1838 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1841 // Check to see if one of the operands is an SMax. If so, expand its operands
1842 // onto our operand list, and recurse to simplify.
1843 if (Idx < Ops.size()) {
1844 bool DeletedSMax = false;
1845 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1846 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1847 Ops.erase(Ops.begin()+Idx);
1852 return getSMaxExpr(Ops);
1855 // Okay, check to see if the same value occurs in the operand list twice. If
1856 // so, delete one. Since we sorted the list, these values are required to
1858 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1859 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
1860 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1864 if (Ops.size() == 1) return Ops[0];
1866 assert(!Ops.empty() && "Reduced smax down to nothing!");
1868 // Okay, it looks like we really DO need an smax expr. Check to see if we
1869 // already have one, otherwise create a new one.
1870 FoldingSetNodeID ID;
1871 ID.AddInteger(scSMaxExpr);
1872 ID.AddInteger(Ops.size());
1873 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1874 ID.AddPointer(Ops[i]);
1876 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1877 SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1878 new (S) SCEVSMaxExpr(Ops);
1879 UniqueSCEVs.InsertNode(S, IP);
1883 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1885 SmallVector<const SCEV *, 2> Ops;
1888 return getUMaxExpr(Ops);
1892 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1893 assert(!Ops.empty() && "Cannot get empty umax!");
1894 if (Ops.size() == 1) return Ops[0];
1896 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1897 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1898 getEffectiveSCEVType(Ops[0]->getType()) &&
1899 "SCEVUMaxExpr operand types don't match!");
1902 // Sort by complexity, this groups all similar expression types together.
1903 GroupByComplexity(Ops, LI);
1905 // If there are any constants, fold them together.
1907 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1909 assert(Idx < Ops.size());
1910 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1911 // We found two constants, fold them together!
1912 ConstantInt *Fold = ConstantInt::get(
1913 APIntOps::umax(LHSC->getValue()->getValue(),
1914 RHSC->getValue()->getValue()));
1915 Ops[0] = getConstant(Fold);
1916 Ops.erase(Ops.begin()+1); // Erase the folded element
1917 if (Ops.size() == 1) return Ops[0];
1918 LHSC = cast<SCEVConstant>(Ops[0]);
1921 // If we are left with a constant minimum-int, strip it off.
1922 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1923 Ops.erase(Ops.begin());
1925 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1926 // If we have an umax with a constant maximum-int, it will always be
1932 if (Ops.size() == 1) return Ops[0];
1934 // Find the first UMax
1935 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1938 // Check to see if one of the operands is a UMax. If so, expand its operands
1939 // onto our operand list, and recurse to simplify.
1940 if (Idx < Ops.size()) {
1941 bool DeletedUMax = false;
1942 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1943 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1944 Ops.erase(Ops.begin()+Idx);
1949 return getUMaxExpr(Ops);
1952 // Okay, check to see if the same value occurs in the operand list twice. If
1953 // so, delete one. Since we sorted the list, these values are required to
1955 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1956 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
1957 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1961 if (Ops.size() == 1) return Ops[0];
1963 assert(!Ops.empty() && "Reduced umax down to nothing!");
1965 // Okay, it looks like we really DO need a umax expr. Check to see if we
1966 // already have one, otherwise create a new one.
1967 FoldingSetNodeID ID;
1968 ID.AddInteger(scUMaxExpr);
1969 ID.AddInteger(Ops.size());
1970 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1971 ID.AddPointer(Ops[i]);
1973 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1974 SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
1975 new (S) SCEVUMaxExpr(Ops);
1976 UniqueSCEVs.InsertNode(S, IP);
1980 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1982 // ~smax(~x, ~y) == smin(x, y).
1983 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1986 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1988 // ~umax(~x, ~y) == umin(x, y)
1989 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1992 const SCEV *ScalarEvolution::getUnknown(Value *V) {
1993 // Don't attempt to do anything other than create a SCEVUnknown object
1994 // here. createSCEV only calls getUnknown after checking for all other
1995 // interesting possibilities, and any other code that calls getUnknown
1996 // is doing so in order to hide a value from SCEV canonicalization.
1998 FoldingSetNodeID ID;
1999 ID.AddInteger(scUnknown);
2002 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2003 SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2004 new (S) SCEVUnknown(V);
2005 UniqueSCEVs.InsertNode(S, IP);
2009 //===----------------------------------------------------------------------===//
2010 // Basic SCEV Analysis and PHI Idiom Recognition Code
2013 /// isSCEVable - Test if values of the given type are analyzable within
2014 /// the SCEV framework. This primarily includes integer types, and it
2015 /// can optionally include pointer types if the ScalarEvolution class
2016 /// has access to target-specific information.
2017 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2018 // Integers are always SCEVable.
2019 if (Ty->isInteger())
2022 // Pointers are SCEVable if TargetData information is available
2023 // to provide pointer size information.
2024 if (isa<PointerType>(Ty))
2027 // Otherwise it's not SCEVable.
2031 /// getTypeSizeInBits - Return the size in bits of the specified type,
2032 /// for which isSCEVable must return true.
2033 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2034 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2036 // If we have a TargetData, use it!
2038 return TD->getTypeSizeInBits(Ty);
2040 // Otherwise, we support only integer types.
2041 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2042 return Ty->getPrimitiveSizeInBits();
2045 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2046 /// the given type and which represents how SCEV will treat the given
2047 /// type, for which isSCEVable must return true. For pointer types,
2048 /// this is the pointer-sized integer type.
2049 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2050 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2052 if (Ty->isInteger())
2055 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2056 return TD->getIntPtrType();
2059 const SCEV *ScalarEvolution::getCouldNotCompute() {
2060 return &CouldNotCompute;
2063 /// hasSCEV - Return true if the SCEV for this value has already been
2065 bool ScalarEvolution::hasSCEV(Value *V) const {
2066 return Scalars.count(V);
2069 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2070 /// expression and create a new one.
2071 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2072 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2074 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2075 if (I != Scalars.end()) return I->second;
2076 const SCEV *S = createSCEV(V);
2077 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2081 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
2082 /// specified signed integer value and return a SCEV for the constant.
2083 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2084 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2085 return getConstant(ConstantInt::get(ITy, Val));
2088 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2090 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2091 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2092 return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2094 const Type *Ty = V->getType();
2095 Ty = getEffectiveSCEVType(Ty);
2096 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
2099 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2100 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2101 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2102 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2104 const Type *Ty = V->getType();
2105 Ty = getEffectiveSCEVType(Ty);
2106 const SCEV *AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
2107 return getMinusSCEV(AllOnes, V);
2110 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2112 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2115 return getAddExpr(LHS, getNegativeSCEV(RHS));
2118 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2119 /// input value to the specified type. If the type must be extended, it is zero
2122 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2124 const Type *SrcTy = V->getType();
2125 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2126 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2127 "Cannot truncate or zero extend with non-integer arguments!");
2128 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2129 return V; // No conversion
2130 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2131 return getTruncateExpr(V, Ty);
2132 return getZeroExtendExpr(V, Ty);
2135 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2136 /// input value to the specified type. If the type must be extended, it is sign
2139 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2141 const Type *SrcTy = V->getType();
2142 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2143 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2144 "Cannot truncate or zero extend with non-integer arguments!");
2145 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2146 return V; // No conversion
2147 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2148 return getTruncateExpr(V, Ty);
2149 return getSignExtendExpr(V, Ty);
2152 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2153 /// input value to the specified type. If the type must be extended, it is zero
2154 /// extended. The conversion must not be narrowing.
2156 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2157 const Type *SrcTy = V->getType();
2158 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2159 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2160 "Cannot noop or zero extend with non-integer arguments!");
2161 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2162 "getNoopOrZeroExtend cannot truncate!");
2163 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2164 return V; // No conversion
2165 return getZeroExtendExpr(V, Ty);
2168 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2169 /// input value to the specified type. If the type must be extended, it is sign
2170 /// extended. The conversion must not be narrowing.
2172 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2173 const Type *SrcTy = V->getType();
2174 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2175 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2176 "Cannot noop or sign extend with non-integer arguments!");
2177 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2178 "getNoopOrSignExtend cannot truncate!");
2179 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2180 return V; // No conversion
2181 return getSignExtendExpr(V, Ty);
2184 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2185 /// the input value to the specified type. If the type must be extended,
2186 /// it is extended with unspecified bits. The conversion must not be
2189 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2190 const Type *SrcTy = V->getType();
2191 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2192 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2193 "Cannot noop or any extend with non-integer arguments!");
2194 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2195 "getNoopOrAnyExtend cannot truncate!");
2196 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2197 return V; // No conversion
2198 return getAnyExtendExpr(V, Ty);
2201 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2202 /// input value to the specified type. The conversion must not be widening.
2204 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2205 const Type *SrcTy = V->getType();
2206 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2207 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2208 "Cannot truncate or noop with non-integer arguments!");
2209 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2210 "getTruncateOrNoop cannot extend!");
2211 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2212 return V; // No conversion
2213 return getTruncateExpr(V, Ty);
2216 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2217 /// the types using zero-extension, and then perform a umax operation
2219 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2221 const SCEV *PromotedLHS = LHS;
2222 const SCEV *PromotedRHS = RHS;
2224 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2225 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2227 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2229 return getUMaxExpr(PromotedLHS, PromotedRHS);
2232 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2233 /// the types using zero-extension, and then perform a umin operation
2235 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2237 const SCEV *PromotedLHS = LHS;
2238 const SCEV *PromotedRHS = RHS;
2240 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2241 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2243 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2245 return getUMinExpr(PromotedLHS, PromotedRHS);
2248 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2249 /// the specified instruction and replaces any references to the symbolic value
2250 /// SymName with the specified value. This is used during PHI resolution.
2252 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2253 const SCEV *SymName,
2254 const SCEV *NewVal) {
2255 std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2256 Scalars.find(SCEVCallbackVH(I, this));
2257 if (SI == Scalars.end()) return;
2260 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2261 if (NV == SI->second) return; // No change.
2263 SI->second = NV; // Update the scalars map!
2265 // Any instruction values that use this instruction might also need to be
2267 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2269 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2272 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2273 /// a loop header, making it a potential recurrence, or it doesn't.
2275 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2276 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
2277 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2278 if (L->getHeader() == PN->getParent()) {
2279 // If it lives in the loop header, it has two incoming values, one
2280 // from outside the loop, and one from inside.
2281 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2282 unsigned BackEdge = IncomingEdge^1;
2284 // While we are analyzing this PHI node, handle its value symbolically.
2285 const SCEV *SymbolicName = getUnknown(PN);
2286 assert(Scalars.find(PN) == Scalars.end() &&
2287 "PHI node already processed?");
2288 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2290 // Using this symbolic name for the PHI, analyze the value coming around
2292 const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2294 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2295 // has a special value for the first iteration of the loop.
2297 // If the value coming around the backedge is an add with the symbolic
2298 // value we just inserted, then we found a simple induction variable!
2299 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2300 // If there is a single occurrence of the symbolic value, replace it
2301 // with a recurrence.
2302 unsigned FoundIndex = Add->getNumOperands();
2303 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2304 if (Add->getOperand(i) == SymbolicName)
2305 if (FoundIndex == e) {
2310 if (FoundIndex != Add->getNumOperands()) {
2311 // Create an add with everything but the specified operand.
2312 SmallVector<const SCEV *, 8> Ops;
2313 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2314 if (i != FoundIndex)
2315 Ops.push_back(Add->getOperand(i));
2316 const SCEV *Accum = getAddExpr(Ops);
2318 // This is not a valid addrec if the step amount is varying each
2319 // loop iteration, but is not itself an addrec in this loop.
2320 if (Accum->isLoopInvariant(L) ||
2321 (isa<SCEVAddRecExpr>(Accum) &&
2322 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2323 const SCEV *StartVal =
2324 getSCEV(PN->getIncomingValue(IncomingEdge));
2325 const SCEV *PHISCEV =
2326 getAddRecExpr(StartVal, Accum, L);
2328 // Okay, for the entire analysis of this edge we assumed the PHI
2329 // to be symbolic. We now need to go back and update all of the
2330 // entries for the scalars that use the PHI (except for the PHI
2331 // itself) to use the new analyzed value instead of the "symbolic"
2333 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2337 } else if (const SCEVAddRecExpr *AddRec =
2338 dyn_cast<SCEVAddRecExpr>(BEValue)) {
2339 // Otherwise, this could be a loop like this:
2340 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2341 // In this case, j = {1,+,1} and BEValue is j.
2342 // Because the other in-value of i (0) fits the evolution of BEValue
2343 // i really is an addrec evolution.
2344 if (AddRec->getLoop() == L && AddRec->isAffine()) {
2345 const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2347 // If StartVal = j.start - j.stride, we can use StartVal as the
2348 // initial step of the addrec evolution.
2349 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2350 AddRec->getOperand(1))) {
2351 const SCEV *PHISCEV =
2352 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2354 // Okay, for the entire analysis of this edge we assumed the PHI
2355 // to be symbolic. We now need to go back and update all of the
2356 // entries for the scalars that use the PHI (except for the PHI
2357 // itself) to use the new analyzed value instead of the "symbolic"
2359 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2365 return SymbolicName;
2368 // If it's not a loop phi, we can't handle it yet.
2369 return getUnknown(PN);
2372 /// createNodeForGEP - Expand GEP instructions into add and multiply
2373 /// operations. This allows them to be analyzed by regular SCEV code.
2375 const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) {
2377 const Type *IntPtrTy = TD->getIntPtrType();
2378 Value *Base = GEP->getOperand(0);
2379 // Don't attempt to analyze GEPs over unsized objects.
2380 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2381 return getUnknown(GEP);
2382 const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2383 gep_type_iterator GTI = gep_type_begin(GEP);
2384 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2388 // Compute the (potentially symbolic) offset in bytes for this index.
2389 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2390 // For a struct, add the member offset.
2391 const StructLayout &SL = *TD->getStructLayout(STy);
2392 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2393 uint64_t Offset = SL.getElementOffset(FieldNo);
2394 TotalOffset = getAddExpr(TotalOffset,
2395 getIntegerSCEV(Offset, IntPtrTy));
2397 // For an array, add the element offset, explicitly scaled.
2398 const SCEV *LocalOffset = getSCEV(Index);
2399 if (!isa<PointerType>(LocalOffset->getType()))
2400 // Getelementptr indicies are signed.
2401 LocalOffset = getTruncateOrSignExtend(LocalOffset,
2404 getMulExpr(LocalOffset,
2405 getIntegerSCEV(TD->getTypeAllocSize(*GTI),
2407 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2410 return getAddExpr(getSCEV(Base), TotalOffset);
2413 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2414 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2415 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2416 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2418 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2419 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2420 return C->getValue()->getValue().countTrailingZeros();
2422 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2423 return std::min(GetMinTrailingZeros(T->getOperand()),
2424 (uint32_t)getTypeSizeInBits(T->getType()));
2426 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2427 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2428 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2429 getTypeSizeInBits(E->getType()) : OpRes;
2432 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2433 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2434 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2435 getTypeSizeInBits(E->getType()) : OpRes;
2438 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2439 // The result is the min of all operands results.
2440 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2441 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2442 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2446 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2447 // The result is the sum of all operands results.
2448 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2449 uint32_t BitWidth = getTypeSizeInBits(M->getType());
2450 for (unsigned i = 1, e = M->getNumOperands();
2451 SumOpRes != BitWidth && i != e; ++i)
2452 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2457 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2458 // The result is the min of all operands results.
2459 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2460 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2461 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2465 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2466 // The result is the min of all operands results.
2467 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2468 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2469 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2473 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2474 // The result is the min of all operands results.
2475 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2476 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2477 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2481 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2482 // For a SCEVUnknown, ask ValueTracking.
2483 unsigned BitWidth = getTypeSizeInBits(U->getType());
2484 APInt Mask = APInt::getAllOnesValue(BitWidth);
2485 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2486 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2487 return Zeros.countTrailingOnes();
2495 ScalarEvolution::GetMinLeadingZeros(const SCEV *S) {
2496 // TODO: Handle other SCEV expression types here.
2498 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2499 return C->getValue()->getValue().countLeadingZeros();
2501 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) {
2502 // A zero-extension cast adds zero bits.
2503 return GetMinLeadingZeros(C->getOperand()) +
2504 (getTypeSizeInBits(C->getType()) -
2505 getTypeSizeInBits(C->getOperand()->getType()));
2508 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2509 // For a SCEVUnknown, ask ValueTracking.
2510 unsigned BitWidth = getTypeSizeInBits(U->getType());
2511 APInt Mask = APInt::getAllOnesValue(BitWidth);
2512 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2513 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2514 return Zeros.countLeadingOnes();
2521 ScalarEvolution::GetMinSignBits(const SCEV *S) {
2522 // TODO: Handle other SCEV expression types here.
2524 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
2525 const APInt &A = C->getValue()->getValue();
2526 return A.isNegative() ? A.countLeadingOnes() :
2527 A.countLeadingZeros();
2530 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) {
2531 // A sign-extension cast adds sign bits.
2532 return GetMinSignBits(C->getOperand()) +
2533 (getTypeSizeInBits(C->getType()) -
2534 getTypeSizeInBits(C->getOperand()->getType()));
2537 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2538 unsigned BitWidth = getTypeSizeInBits(A->getType());
2540 // Special case decrementing a value (ADD X, -1):
2541 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0)))
2542 if (CRHS->isAllOnesValue()) {
2543 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end());
2544 const SCEV *OtherOpsAdd = getAddExpr(OtherOps);
2545 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd);
2547 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2549 if (LZ == BitWidth - 1)
2552 // If we are subtracting one from a positive number, there is no carry
2553 // out of the result.
2555 return GetMinSignBits(OtherOpsAdd);
2558 // Add can have at most one carry bit. Thus we know that the output
2559 // is, at worst, one more bit than the inputs.
2560 unsigned Min = BitWidth;
2561 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2562 unsigned N = GetMinSignBits(A->getOperand(i));
2563 Min = std::min(Min, N) - 1;
2564 if (Min == 0) return 1;
2569 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2570 // For a SCEVUnknown, ask ValueTracking.
2571 return ComputeNumSignBits(U->getValue(), TD);
2577 /// createSCEV - We know that there is no SCEV for the specified value.
2578 /// Analyze the expression.
2580 const SCEV *ScalarEvolution::createSCEV(Value *V) {
2581 if (!isSCEVable(V->getType()))
2582 return getUnknown(V);
2584 unsigned Opcode = Instruction::UserOp1;
2585 if (Instruction *I = dyn_cast<Instruction>(V))
2586 Opcode = I->getOpcode();
2587 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2588 Opcode = CE->getOpcode();
2589 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2590 return getConstant(CI);
2591 else if (isa<ConstantPointerNull>(V))
2592 return getIntegerSCEV(0, V->getType());
2593 else if (isa<UndefValue>(V))
2594 return getIntegerSCEV(0, V->getType());
2596 return getUnknown(V);
2598 User *U = cast<User>(V);
2600 case Instruction::Add:
2601 return getAddExpr(getSCEV(U->getOperand(0)),
2602 getSCEV(U->getOperand(1)));
2603 case Instruction::Mul:
2604 return getMulExpr(getSCEV(U->getOperand(0)),
2605 getSCEV(U->getOperand(1)));
2606 case Instruction::UDiv:
2607 return getUDivExpr(getSCEV(U->getOperand(0)),
2608 getSCEV(U->getOperand(1)));
2609 case Instruction::Sub:
2610 return getMinusSCEV(getSCEV(U->getOperand(0)),
2611 getSCEV(U->getOperand(1)));
2612 case Instruction::And:
2613 // For an expression like x&255 that merely masks off the high bits,
2614 // use zext(trunc(x)) as the SCEV expression.
2615 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2616 if (CI->isNullValue())
2617 return getSCEV(U->getOperand(1));
2618 if (CI->isAllOnesValue())
2619 return getSCEV(U->getOperand(0));
2620 const APInt &A = CI->getValue();
2622 // Instcombine's ShrinkDemandedConstant may strip bits out of
2623 // constants, obscuring what would otherwise be a low-bits mask.
2624 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2625 // knew about to reconstruct a low-bits mask value.
2626 unsigned LZ = A.countLeadingZeros();
2627 unsigned BitWidth = A.getBitWidth();
2628 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2629 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2630 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2632 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2634 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2636 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2637 IntegerType::get(BitWidth - LZ)),
2642 case Instruction::Or:
2643 // If the RHS of the Or is a constant, we may have something like:
2644 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
2645 // optimizations will transparently handle this case.
2647 // In order for this transformation to be safe, the LHS must be of the
2648 // form X*(2^n) and the Or constant must be less than 2^n.
2649 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2650 const SCEV *LHS = getSCEV(U->getOperand(0));
2651 const APInt &CIVal = CI->getValue();
2652 if (GetMinTrailingZeros(LHS) >=
2653 (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2654 return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2657 case Instruction::Xor:
2658 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2659 // If the RHS of the xor is a signbit, then this is just an add.
2660 // Instcombine turns add of signbit into xor as a strength reduction step.
2661 if (CI->getValue().isSignBit())
2662 return getAddExpr(getSCEV(U->getOperand(0)),
2663 getSCEV(U->getOperand(1)));
2665 // If the RHS of xor is -1, then this is a not operation.
2666 if (CI->isAllOnesValue())
2667 return getNotSCEV(getSCEV(U->getOperand(0)));
2669 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2670 // This is a variant of the check for xor with -1, and it handles
2671 // the case where instcombine has trimmed non-demanded bits out
2672 // of an xor with -1.
2673 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2674 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2675 if (BO->getOpcode() == Instruction::And &&
2676 LCI->getValue() == CI->getValue())
2677 if (const SCEVZeroExtendExpr *Z =
2678 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2679 const Type *UTy = U->getType();
2680 const SCEV *Z0 = Z->getOperand();
2681 const Type *Z0Ty = Z0->getType();
2682 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2684 // If C is a low-bits mask, the zero extend is zerving to
2685 // mask off the high bits. Complement the operand and
2686 // re-apply the zext.
2687 if (APIntOps::isMask(Z0TySize, CI->getValue()))
2688 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2690 // If C is a single bit, it may be in the sign-bit position
2691 // before the zero-extend. In this case, represent the xor
2692 // using an add, which is equivalent, and re-apply the zext.
2693 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2694 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2696 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2702 case Instruction::Shl:
2703 // Turn shift left of a constant amount into a multiply.
2704 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2705 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2706 Constant *X = ConstantInt::get(
2707 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2708 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2712 case Instruction::LShr:
2713 // Turn logical shift right of a constant into a unsigned divide.
2714 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2715 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2716 Constant *X = ConstantInt::get(
2717 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2718 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2722 case Instruction::AShr:
2723 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2724 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2725 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2726 if (L->getOpcode() == Instruction::Shl &&
2727 L->getOperand(1) == U->getOperand(1)) {
2728 unsigned BitWidth = getTypeSizeInBits(U->getType());
2729 uint64_t Amt = BitWidth - CI->getZExtValue();
2730 if (Amt == BitWidth)
2731 return getSCEV(L->getOperand(0)); // shift by zero --> noop
2733 return getIntegerSCEV(0, U->getType()); // value is undefined
2735 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2736 IntegerType::get(Amt)),
2741 case Instruction::Trunc:
2742 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2744 case Instruction::ZExt:
2745 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2747 case Instruction::SExt:
2748 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2750 case Instruction::BitCast:
2751 // BitCasts are no-op casts so we just eliminate the cast.
2752 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2753 return getSCEV(U->getOperand(0));
2756 case Instruction::IntToPtr:
2757 if (!TD) break; // Without TD we can't analyze pointers.
2758 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2759 TD->getIntPtrType());
2761 case Instruction::PtrToInt:
2762 if (!TD) break; // Without TD we can't analyze pointers.
2763 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2766 case Instruction::GetElementPtr:
2767 if (!TD) break; // Without TD we can't analyze pointers.
2768 return createNodeForGEP(U);
2770 case Instruction::PHI:
2771 return createNodeForPHI(cast<PHINode>(U));
2773 case Instruction::Select:
2774 // This could be a smax or umax that was lowered earlier.
2775 // Try to recover it.
2776 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2777 Value *LHS = ICI->getOperand(0);
2778 Value *RHS = ICI->getOperand(1);
2779 switch (ICI->getPredicate()) {
2780 case ICmpInst::ICMP_SLT:
2781 case ICmpInst::ICMP_SLE:
2782 std::swap(LHS, RHS);
2784 case ICmpInst::ICMP_SGT:
2785 case ICmpInst::ICMP_SGE:
2786 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2787 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2788 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2789 return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2791 case ICmpInst::ICMP_ULT:
2792 case ICmpInst::ICMP_ULE:
2793 std::swap(LHS, RHS);
2795 case ICmpInst::ICMP_UGT:
2796 case ICmpInst::ICMP_UGE:
2797 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2798 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2799 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2800 return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2802 case ICmpInst::ICMP_NE:
2803 // n != 0 ? n : 1 -> umax(n, 1)
2804 if (LHS == U->getOperand(1) &&
2805 isa<ConstantInt>(U->getOperand(2)) &&
2806 cast<ConstantInt>(U->getOperand(2))->isOne() &&
2807 isa<ConstantInt>(RHS) &&
2808 cast<ConstantInt>(RHS)->isZero())
2809 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2811 case ICmpInst::ICMP_EQ:
2812 // n == 0 ? 1 : n -> umax(n, 1)
2813 if (LHS == U->getOperand(2) &&
2814 isa<ConstantInt>(U->getOperand(1)) &&
2815 cast<ConstantInt>(U->getOperand(1))->isOne() &&
2816 isa<ConstantInt>(RHS) &&
2817 cast<ConstantInt>(RHS)->isZero())
2818 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2825 default: // We cannot analyze this expression.
2829 return getUnknown(V);
2834 //===----------------------------------------------------------------------===//
2835 // Iteration Count Computation Code
2838 /// getBackedgeTakenCount - If the specified loop has a predictable
2839 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2840 /// object. The backedge-taken count is the number of times the loop header
2841 /// will be branched to from within the loop. This is one less than the
2842 /// trip count of the loop, since it doesn't count the first iteration,
2843 /// when the header is branched to from outside the loop.
2845 /// Note that it is not valid to call this method on a loop without a
2846 /// loop-invariant backedge-taken count (see
2847 /// hasLoopInvariantBackedgeTakenCount).
2849 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
2850 return getBackedgeTakenInfo(L).Exact;
2853 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
2854 /// return the least SCEV value that is known never to be less than the
2855 /// actual backedge taken count.
2856 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
2857 return getBackedgeTakenInfo(L).Max;
2860 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
2861 /// onto the given Worklist.
2863 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
2864 BasicBlock *Header = L->getHeader();
2866 // Push all Loop-header PHIs onto the Worklist stack.
2867 for (BasicBlock::iterator I = Header->begin();
2868 PHINode *PN = dyn_cast<PHINode>(I); ++I)
2869 Worklist.push_back(PN);
2872 /// PushDefUseChildren - Push users of the given Instruction
2873 /// onto the given Worklist.
2875 PushDefUseChildren(Instruction *I,
2876 SmallVectorImpl<Instruction *> &Worklist) {
2877 // Push the def-use children onto the Worklist stack.
2878 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2880 Worklist.push_back(cast<Instruction>(UI));
2883 const ScalarEvolution::BackedgeTakenInfo &
2884 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
2885 // Initially insert a CouldNotCompute for this loop. If the insertion
2886 // succeeds, procede to actually compute a backedge-taken count and
2887 // update the value. The temporary CouldNotCompute value tells SCEV
2888 // code elsewhere that it shouldn't attempt to request a new
2889 // backedge-taken count, which could result in infinite recursion.
2890 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
2891 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
2893 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
2894 if (ItCount.Exact != getCouldNotCompute()) {
2895 assert(ItCount.Exact->isLoopInvariant(L) &&
2896 ItCount.Max->isLoopInvariant(L) &&
2897 "Computed trip count isn't loop invariant for loop!");
2898 ++NumTripCountsComputed;
2900 // Update the value in the map.
2901 Pair.first->second = ItCount;
2903 if (ItCount.Max != getCouldNotCompute())
2904 // Update the value in the map.
2905 Pair.first->second = ItCount;
2906 if (isa<PHINode>(L->getHeader()->begin()))
2907 // Only count loops that have phi nodes as not being computable.
2908 ++NumTripCountsNotComputed;
2911 // Now that we know more about the trip count for this loop, forget any
2912 // existing SCEV values for PHI nodes in this loop since they are only
2913 // conservative estimates made without the benefit of trip count
2914 // information. This is similar to the code in
2915 // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
2917 if (ItCount.hasAnyInfo()) {
2918 SmallVector<Instruction *, 16> Worklist;
2919 PushLoopPHIs(L, Worklist);
2921 SmallPtrSet<Instruction *, 8> Visited;
2922 while (!Worklist.empty()) {
2923 Instruction *I = Worklist.pop_back_val();
2924 if (!Visited.insert(I)) continue;
2926 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2927 Scalars.find(static_cast<Value *>(I));
2928 if (It != Scalars.end()) {
2929 // SCEVUnknown for a PHI either means that it has an unrecognized
2930 // structure, or it's a PHI that's in the progress of being computed
2931 // by createNodeForPHI. In the former case, additional loop trip count
2932 // information isn't going to change anything. In the later case,
2933 // createNodeForPHI will perform the necessary updates on its own when
2934 // it gets to that point.
2935 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
2937 ValuesAtScopes.erase(I);
2938 if (PHINode *PN = dyn_cast<PHINode>(I))
2939 ConstantEvolutionLoopExitValue.erase(PN);
2942 PushDefUseChildren(I, Worklist);
2946 return Pair.first->second;
2949 /// forgetLoopBackedgeTakenCount - This method should be called by the
2950 /// client when it has changed a loop in a way that may effect
2951 /// ScalarEvolution's ability to compute a trip count, or if the loop
2953 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
2954 BackedgeTakenCounts.erase(L);
2956 SmallVector<Instruction *, 16> Worklist;
2957 PushLoopPHIs(L, Worklist);
2959 SmallPtrSet<Instruction *, 8> Visited;
2960 while (!Worklist.empty()) {
2961 Instruction *I = Worklist.pop_back_val();
2962 if (!Visited.insert(I)) continue;
2964 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2965 Scalars.find(static_cast<Value *>(I));
2966 if (It != Scalars.end()) {
2968 ValuesAtScopes.erase(I);
2969 if (PHINode *PN = dyn_cast<PHINode>(I))
2970 ConstantEvolutionLoopExitValue.erase(PN);
2973 PushDefUseChildren(I, Worklist);
2977 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
2978 /// of the specified loop will execute.
2979 ScalarEvolution::BackedgeTakenInfo
2980 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
2981 SmallVector<BasicBlock*, 8> ExitingBlocks;
2982 L->getExitingBlocks(ExitingBlocks);
2984 // Examine all exits and pick the most conservative values.
2985 const SCEV *BECount = getCouldNotCompute();
2986 const SCEV *MaxBECount = getCouldNotCompute();
2987 bool CouldNotComputeBECount = false;
2988 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2989 BackedgeTakenInfo NewBTI =
2990 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
2992 if (NewBTI.Exact == getCouldNotCompute()) {
2993 // We couldn't compute an exact value for this exit, so
2994 // we won't be able to compute an exact value for the loop.
2995 CouldNotComputeBECount = true;
2996 BECount = getCouldNotCompute();
2997 } else if (!CouldNotComputeBECount) {
2998 if (BECount == getCouldNotCompute())
2999 BECount = NewBTI.Exact;
3001 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3003 if (MaxBECount == getCouldNotCompute())
3004 MaxBECount = NewBTI.Max;
3005 else if (NewBTI.Max != getCouldNotCompute())
3006 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3009 return BackedgeTakenInfo(BECount, MaxBECount);
3012 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3013 /// of the specified loop will execute if it exits via the specified block.
3014 ScalarEvolution::BackedgeTakenInfo
3015 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3016 BasicBlock *ExitingBlock) {
3018 // Okay, we've chosen an exiting block. See what condition causes us to
3019 // exit at this block.
3021 // FIXME: we should be able to handle switch instructions (with a single exit)
3022 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3023 if (ExitBr == 0) return getCouldNotCompute();
3024 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3026 // At this point, we know we have a conditional branch that determines whether
3027 // the loop is exited. However, we don't know if the branch is executed each
3028 // time through the loop. If not, then the execution count of the branch will
3029 // not be equal to the trip count of the loop.
3031 // Currently we check for this by checking to see if the Exit branch goes to
3032 // the loop header. If so, we know it will always execute the same number of
3033 // times as the loop. We also handle the case where the exit block *is* the
3034 // loop header. This is common for un-rotated loops.
3036 // If both of those tests fail, walk up the unique predecessor chain to the
3037 // header, stopping if there is an edge that doesn't exit the loop. If the
3038 // header is reached, the execution count of the branch will be equal to the
3039 // trip count of the loop.
3041 // More extensive analysis could be done to handle more cases here.
3043 if (ExitBr->getSuccessor(0) != L->getHeader() &&
3044 ExitBr->getSuccessor(1) != L->getHeader() &&
3045 ExitBr->getParent() != L->getHeader()) {
3046 // The simple checks failed, try climbing the unique predecessor chain
3047 // up to the header.
3049 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3050 BasicBlock *Pred = BB->getUniquePredecessor();
3052 return getCouldNotCompute();
3053 TerminatorInst *PredTerm = Pred->getTerminator();
3054 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3055 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3058 // If the predecessor has a successor that isn't BB and isn't
3059 // outside the loop, assume the worst.
3060 if (L->contains(PredSucc))
3061 return getCouldNotCompute();
3063 if (Pred == L->getHeader()) {
3070 return getCouldNotCompute();
3073 // Procede to the next level to examine the exit condition expression.
3074 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3075 ExitBr->getSuccessor(0),
3076 ExitBr->getSuccessor(1));
3079 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3080 /// backedge of the specified loop will execute if its exit condition
3081 /// were a conditional branch of ExitCond, TBB, and FBB.
3082 ScalarEvolution::BackedgeTakenInfo
3083 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3087 // Check if the controlling expression for this loop is an And or Or.
3088 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3089 if (BO->getOpcode() == Instruction::And) {
3090 // Recurse on the operands of the and.
3091 BackedgeTakenInfo BTI0 =
3092 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3093 BackedgeTakenInfo BTI1 =
3094 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3095 const SCEV *BECount = getCouldNotCompute();
3096 const SCEV *MaxBECount = getCouldNotCompute();
3097 if (L->contains(TBB)) {
3098 // Both conditions must be true for the loop to continue executing.
3099 // Choose the less conservative count.
3100 if (BTI0.Exact == getCouldNotCompute() ||
3101 BTI1.Exact == getCouldNotCompute())
3102 BECount = getCouldNotCompute();
3104 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3105 if (BTI0.Max == getCouldNotCompute())
3106 MaxBECount = BTI1.Max;
3107 else if (BTI1.Max == getCouldNotCompute())
3108 MaxBECount = BTI0.Max;
3110 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3112 // Both conditions must be true for the loop to exit.
3113 assert(L->contains(FBB) && "Loop block has no successor in loop!");
3114 if (BTI0.Exact != getCouldNotCompute() &&
3115 BTI1.Exact != getCouldNotCompute())
3116 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3117 if (BTI0.Max != getCouldNotCompute() &&
3118 BTI1.Max != getCouldNotCompute())
3119 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3122 return BackedgeTakenInfo(BECount, MaxBECount);
3124 if (BO->getOpcode() == Instruction::Or) {
3125 // Recurse on the operands of the or.
3126 BackedgeTakenInfo BTI0 =
3127 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3128 BackedgeTakenInfo BTI1 =
3129 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3130 const SCEV *BECount = getCouldNotCompute();
3131 const SCEV *MaxBECount = getCouldNotCompute();
3132 if (L->contains(FBB)) {
3133 // Both conditions must be false for the loop to continue executing.
3134 // Choose the less conservative count.
3135 if (BTI0.Exact == getCouldNotCompute() ||
3136 BTI1.Exact == getCouldNotCompute())
3137 BECount = getCouldNotCompute();
3139 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3140 if (BTI0.Max == getCouldNotCompute())
3141 MaxBECount = BTI1.Max;
3142 else if (BTI1.Max == getCouldNotCompute())
3143 MaxBECount = BTI0.Max;
3145 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3147 // Both conditions must be false for the loop to exit.
3148 assert(L->contains(TBB) && "Loop block has no successor in loop!");
3149 if (BTI0.Exact != getCouldNotCompute() &&
3150 BTI1.Exact != getCouldNotCompute())
3151 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3152 if (BTI0.Max != getCouldNotCompute() &&
3153 BTI1.Max != getCouldNotCompute())
3154 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3157 return BackedgeTakenInfo(BECount, MaxBECount);
3161 // With an icmp, it may be feasible to compute an exact backedge-taken count.
3162 // Procede to the next level to examine the icmp.
3163 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3164 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3166 // If it's not an integer or pointer comparison then compute it the hard way.
3167 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3170 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3171 /// backedge of the specified loop will execute if its exit condition
3172 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3173 ScalarEvolution::BackedgeTakenInfo
3174 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3179 // If the condition was exit on true, convert the condition to exit on false
3180 ICmpInst::Predicate Cond;
3181 if (!L->contains(FBB))
3182 Cond = ExitCond->getPredicate();
3184 Cond = ExitCond->getInversePredicate();
3186 // Handle common loops like: for (X = "string"; *X; ++X)
3187 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3188 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3190 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3191 if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3192 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3193 return BackedgeTakenInfo(ItCnt,
3194 isa<SCEVConstant>(ItCnt) ? ItCnt :
3195 getConstant(APInt::getMaxValue(BitWidth)-1));
3199 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3200 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3202 // Try to evaluate any dependencies out of the loop.
3203 LHS = getSCEVAtScope(LHS, L);
3204 RHS = getSCEVAtScope(RHS, L);
3206 // At this point, we would like to compute how many iterations of the
3207 // loop the predicate will return true for these inputs.
3208 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3209 // If there is a loop-invariant, force it into the RHS.
3210 std::swap(LHS, RHS);
3211 Cond = ICmpInst::getSwappedPredicate(Cond);
3214 // If we have a comparison of a chrec against a constant, try to use value
3215 // ranges to answer this query.
3216 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3217 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3218 if (AddRec->getLoop() == L) {
3219 // Form the constant range.
3220 ConstantRange CompRange(
3221 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3223 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3224 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3228 case ICmpInst::ICMP_NE: { // while (X != Y)
3229 // Convert to: while (X-Y != 0)
3230 const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3231 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3234 case ICmpInst::ICMP_EQ: {
3235 // Convert to: while (X-Y == 0) // while (X == Y)
3236 const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3237 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3240 case ICmpInst::ICMP_SLT: {
3241 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3242 if (BTI.hasAnyInfo()) return BTI;
3245 case ICmpInst::ICMP_SGT: {
3246 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3247 getNotSCEV(RHS), L, true);
3248 if (BTI.hasAnyInfo()) return BTI;
3251 case ICmpInst::ICMP_ULT: {
3252 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3253 if (BTI.hasAnyInfo()) return BTI;
3256 case ICmpInst::ICMP_UGT: {
3257 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3258 getNotSCEV(RHS), L, false);
3259 if (BTI.hasAnyInfo()) return BTI;
3264 errs() << "ComputeBackedgeTakenCount ";
3265 if (ExitCond->getOperand(0)->getType()->isUnsigned())
3266 errs() << "[unsigned] ";
3267 errs() << *LHS << " "
3268 << Instruction::getOpcodeName(Instruction::ICmp)
3269 << " " << *RHS << "\n";
3274 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3277 static ConstantInt *
3278 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3279 ScalarEvolution &SE) {
3280 const SCEV *InVal = SE.getConstant(C);
3281 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3282 assert(isa<SCEVConstant>(Val) &&
3283 "Evaluation of SCEV at constant didn't fold correctly?");
3284 return cast<SCEVConstant>(Val)->getValue();
3287 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3288 /// and a GEP expression (missing the pointer index) indexing into it, return
3289 /// the addressed element of the initializer or null if the index expression is
3292 GetAddressedElementFromGlobal(GlobalVariable *GV,
3293 const std::vector<ConstantInt*> &Indices) {
3294 Constant *Init = GV->getInitializer();
3295 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3296 uint64_t Idx = Indices[i]->getZExtValue();
3297 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3298 assert(Idx < CS->getNumOperands() && "Bad struct index!");
3299 Init = cast<Constant>(CS->getOperand(Idx));
3300 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3301 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
3302 Init = cast<Constant>(CA->getOperand(Idx));
3303 } else if (isa<ConstantAggregateZero>(Init)) {
3304 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3305 assert(Idx < STy->getNumElements() && "Bad struct index!");
3306 Init = Constant::getNullValue(STy->getElementType(Idx));
3307 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3308 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
3309 Init = Constant::getNullValue(ATy->getElementType());
3311 assert(0 && "Unknown constant aggregate type!");
3315 return 0; // Unknown initializer type
3321 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3322 /// 'icmp op load X, cst', try to see if we can compute the backedge
3323 /// execution count.
3325 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3329 ICmpInst::Predicate predicate) {
3330 if (LI->isVolatile()) return getCouldNotCompute();
3332 // Check to see if the loaded pointer is a getelementptr of a global.
3333 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3334 if (!GEP) return getCouldNotCompute();
3336 // Make sure that it is really a constant global we are gepping, with an
3337 // initializer, and make sure the first IDX is really 0.
3338 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3339 if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3340 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3341 !cast<Constant>(GEP->getOperand(1))->isNullValue())
3342 return getCouldNotCompute();
3344 // Okay, we allow one non-constant index into the GEP instruction.
3346 std::vector<ConstantInt*> Indexes;
3347 unsigned VarIdxNum = 0;
3348 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3349 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3350 Indexes.push_back(CI);
3351 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3352 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
3353 VarIdx = GEP->getOperand(i);
3355 Indexes.push_back(0);
3358 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3359 // Check to see if X is a loop variant variable value now.
3360 const SCEV *Idx = getSCEV(VarIdx);
3361 Idx = getSCEVAtScope(Idx, L);
3363 // We can only recognize very limited forms of loop index expressions, in
3364 // particular, only affine AddRec's like {C1,+,C2}.
3365 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3366 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3367 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3368 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3369 return getCouldNotCompute();
3371 unsigned MaxSteps = MaxBruteForceIterations;
3372 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3373 ConstantInt *ItCst =
3374 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3375 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3377 // Form the GEP offset.
3378 Indexes[VarIdxNum] = Val;
3380 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3381 if (Result == 0) break; // Cannot compute!
3383 // Evaluate the condition for this iteration.
3384 Result = ConstantExpr::getICmp(predicate, Result, RHS);
3385 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
3386 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3388 errs() << "\n***\n*** Computed loop count " << *ItCst
3389 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3392 ++NumArrayLenItCounts;
3393 return getConstant(ItCst); // Found terminating iteration!
3396 return getCouldNotCompute();
3400 /// CanConstantFold - Return true if we can constant fold an instruction of the
3401 /// specified type, assuming that all operands were constants.
3402 static bool CanConstantFold(const Instruction *I) {
3403 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3404 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3407 if (const CallInst *CI = dyn_cast<CallInst>(I))
3408 if (const Function *F = CI->getCalledFunction())
3409 return canConstantFoldCallTo(F);
3413 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3414 /// in the loop that V is derived from. We allow arbitrary operations along the
3415 /// way, but the operands of an operation must either be constants or a value
3416 /// derived from a constant PHI. If this expression does not fit with these
3417 /// constraints, return null.
3418 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3419 // If this is not an instruction, or if this is an instruction outside of the
3420 // loop, it can't be derived from a loop PHI.
3421 Instruction *I = dyn_cast<Instruction>(V);
3422 if (I == 0 || !L->contains(I->getParent())) return 0;
3424 if (PHINode *PN = dyn_cast<PHINode>(I)) {
3425 if (L->getHeader() == I->getParent())
3428 // We don't currently keep track of the control flow needed to evaluate
3429 // PHIs, so we cannot handle PHIs inside of loops.
3433 // If we won't be able to constant fold this expression even if the operands
3434 // are constants, return early.
3435 if (!CanConstantFold(I)) return 0;
3437 // Otherwise, we can evaluate this instruction if all of its operands are
3438 // constant or derived from a PHI node themselves.
3440 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3441 if (!(isa<Constant>(I->getOperand(Op)) ||
3442 isa<GlobalValue>(I->getOperand(Op)))) {
3443 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3444 if (P == 0) return 0; // Not evolving from PHI
3448 return 0; // Evolving from multiple different PHIs.
3451 // This is a expression evolving from a constant PHI!
3455 /// EvaluateExpression - Given an expression that passes the
3456 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3457 /// in the loop has the value PHIVal. If we can't fold this expression for some
3458 /// reason, return null.
3459 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3460 if (isa<PHINode>(V)) return PHIVal;
3461 if (Constant *C = dyn_cast<Constant>(V)) return C;
3462 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3463 Instruction *I = cast<Instruction>(V);
3464 LLVMContext *Context = I->getParent()->getContext();
3466 std::vector<Constant*> Operands;
3467 Operands.resize(I->getNumOperands());
3469 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3470 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3471 if (Operands[i] == 0) return 0;
3474 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3475 return ConstantFoldCompareInstOperands(CI->getPredicate(),
3476 &Operands[0], Operands.size(),
3479 return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3480 &Operands[0], Operands.size(),
3484 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3485 /// in the header of its containing loop, we know the loop executes a
3486 /// constant number of times, and the PHI node is just a recurrence
3487 /// involving constants, fold it.
3489 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3492 std::map<PHINode*, Constant*>::iterator I =
3493 ConstantEvolutionLoopExitValue.find(PN);
3494 if (I != ConstantEvolutionLoopExitValue.end())
3497 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3498 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
3500 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3502 // Since the loop is canonicalized, the PHI node must have two entries. One
3503 // entry must be a constant (coming in from outside of the loop), and the
3504 // second must be derived from the same PHI.
3505 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3506 Constant *StartCST =
3507 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3509 return RetVal = 0; // Must be a constant.
3511 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3512 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3514 return RetVal = 0; // Not derived from same PHI.
3516 // Execute the loop symbolically to determine the exit value.
3517 if (BEs.getActiveBits() >= 32)
3518 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3520 unsigned NumIterations = BEs.getZExtValue(); // must be in range
3521 unsigned IterationNum = 0;
3522 for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3523 if (IterationNum == NumIterations)
3524 return RetVal = PHIVal; // Got exit value!
3526 // Compute the value of the PHI node for the next iteration.
3527 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3528 if (NextPHI == PHIVal)
3529 return RetVal = NextPHI; // Stopped evolving!
3531 return 0; // Couldn't evaluate!
3536 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3537 /// constant number of times (the condition evolves only from constants),
3538 /// try to evaluate a few iterations of the loop until we get the exit
3539 /// condition gets a value of ExitWhen (true or false). If we cannot
3540 /// evaluate the trip count of the loop, return getCouldNotCompute().
3542 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3545 PHINode *PN = getConstantEvolvingPHI(Cond, L);
3546 if (PN == 0) return getCouldNotCompute();
3548 // Since the loop is canonicalized, the PHI node must have two entries. One
3549 // entry must be a constant (coming in from outside of the loop), and the
3550 // second must be derived from the same PHI.
3551 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3552 Constant *StartCST =
3553 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3554 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
3556 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3557 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3558 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
3560 // Okay, we find a PHI node that defines the trip count of this loop. Execute
3561 // the loop symbolically to determine when the condition gets a value of
3563 unsigned IterationNum = 0;
3564 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
3565 for (Constant *PHIVal = StartCST;
3566 IterationNum != MaxIterations; ++IterationNum) {
3567 ConstantInt *CondVal =
3568 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3570 // Couldn't symbolically evaluate.
3571 if (!CondVal) return getCouldNotCompute();
3573 if (CondVal->getValue() == uint64_t(ExitWhen)) {
3574 ++NumBruteForceTripCountsComputed;
3575 return getConstant(Type::Int32Ty, IterationNum);
3578 // Compute the value of the PHI node for the next iteration.
3579 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3580 if (NextPHI == 0 || NextPHI == PHIVal)
3581 return getCouldNotCompute();// Couldn't evaluate or not making progress...
3585 // Too many iterations were needed to evaluate.
3586 return getCouldNotCompute();
3589 /// getSCEVAtScope - Return a SCEV expression handle for the specified value
3590 /// at the specified scope in the program. The L value specifies a loop
3591 /// nest to evaluate the expression at, where null is the top-level or a
3592 /// specified loop is immediately inside of the loop.
3594 /// This method can be used to compute the exit value for a variable defined
3595 /// in a loop by querying what the value will hold in the parent loop.
3597 /// In the case that a relevant loop exit value cannot be computed, the
3598 /// original value V is returned.
3599 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3600 // FIXME: this should be turned into a virtual method on SCEV!
3602 if (isa<SCEVConstant>(V)) return V;
3604 // If this instruction is evolved from a constant-evolving PHI, compute the
3605 // exit value from the loop without using SCEVs.
3606 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3607 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3608 const Loop *LI = (*this->LI)[I->getParent()];
3609 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
3610 if (PHINode *PN = dyn_cast<PHINode>(I))
3611 if (PN->getParent() == LI->getHeader()) {
3612 // Okay, there is no closed form solution for the PHI node. Check
3613 // to see if the loop that contains it has a known backedge-taken
3614 // count. If so, we may be able to force computation of the exit
3616 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3617 if (const SCEVConstant *BTCC =
3618 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3619 // Okay, we know how many times the containing loop executes. If
3620 // this is a constant evolving PHI node, get the final value at
3621 // the specified iteration number.
3622 Constant *RV = getConstantEvolutionLoopExitValue(PN,
3623 BTCC->getValue()->getValue(),
3625 if (RV) return getSCEV(RV);
3629 // Okay, this is an expression that we cannot symbolically evaluate
3630 // into a SCEV. Check to see if it's possible to symbolically evaluate
3631 // the arguments into constants, and if so, try to constant propagate the
3632 // result. This is particularly useful for computing loop exit values.
3633 if (CanConstantFold(I)) {
3634 // Check to see if we've folded this instruction at this loop before.
3635 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3636 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3637 Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3639 return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3641 std::vector<Constant*> Operands;
3642 Operands.reserve(I->getNumOperands());
3643 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3644 Value *Op = I->getOperand(i);
3645 if (Constant *C = dyn_cast<Constant>(Op)) {
3646 Operands.push_back(C);
3648 // If any of the operands is non-constant and if they are
3649 // non-integer and non-pointer, don't even try to analyze them
3650 // with scev techniques.
3651 if (!isSCEVable(Op->getType()))
3654 const SCEV *OpV = getSCEVAtScope(getSCEV(Op), L);
3655 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3656 Constant *C = SC->getValue();
3657 if (C->getType() != Op->getType())
3658 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3662 Operands.push_back(C);
3663 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3664 if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3665 if (C->getType() != Op->getType())
3667 ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3671 Operands.push_back(C);
3681 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3682 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3683 &Operands[0], Operands.size(),
3686 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3687 &Operands[0], Operands.size(), Context);
3688 Pair.first->second = C;
3693 // This is some other type of SCEVUnknown, just return it.
3697 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3698 // Avoid performing the look-up in the common case where the specified
3699 // expression has no loop-variant portions.
3700 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3701 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3702 if (OpAtScope != Comm->getOperand(i)) {
3703 // Okay, at least one of these operands is loop variant but might be
3704 // foldable. Build a new instance of the folded commutative expression.
3705 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3706 Comm->op_begin()+i);
3707 NewOps.push_back(OpAtScope);
3709 for (++i; i != e; ++i) {
3710 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3711 NewOps.push_back(OpAtScope);
3713 if (isa<SCEVAddExpr>(Comm))
3714 return getAddExpr(NewOps);
3715 if (isa<SCEVMulExpr>(Comm))
3716 return getMulExpr(NewOps);
3717 if (isa<SCEVSMaxExpr>(Comm))
3718 return getSMaxExpr(NewOps);
3719 if (isa<SCEVUMaxExpr>(Comm))
3720 return getUMaxExpr(NewOps);
3721 assert(0 && "Unknown commutative SCEV type!");
3724 // If we got here, all operands are loop invariant.
3728 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3729 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3730 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3731 if (LHS == Div->getLHS() && RHS == Div->getRHS())
3732 return Div; // must be loop invariant
3733 return getUDivExpr(LHS, RHS);
3736 // If this is a loop recurrence for a loop that does not contain L, then we
3737 // are dealing with the final value computed by the loop.
3738 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3739 if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3740 // To evaluate this recurrence, we need to know how many times the AddRec
3741 // loop iterates. Compute this now.
3742 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3743 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3745 // Then, evaluate the AddRec.
3746 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3751 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3752 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3753 if (Op == Cast->getOperand())
3754 return Cast; // must be loop invariant
3755 return getZeroExtendExpr(Op, Cast->getType());
3758 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3759 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3760 if (Op == Cast->getOperand())
3761 return Cast; // must be loop invariant
3762 return getSignExtendExpr(Op, Cast->getType());
3765 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3766 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3767 if (Op == Cast->getOperand())
3768 return Cast; // must be loop invariant
3769 return getTruncateExpr(Op, Cast->getType());
3772 assert(0 && "Unknown SCEV type!");
3776 /// getSCEVAtScope - This is a convenience function which does
3777 /// getSCEVAtScope(getSCEV(V), L).
3778 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3779 return getSCEVAtScope(getSCEV(V), L);
3782 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3783 /// following equation:
3785 /// A * X = B (mod N)
3787 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3788 /// A and B isn't important.
3790 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3791 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3792 ScalarEvolution &SE) {
3793 uint32_t BW = A.getBitWidth();
3794 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3795 assert(A != 0 && "A must be non-zero.");
3799 // The gcd of A and N may have only one prime factor: 2. The number of
3800 // trailing zeros in A is its multiplicity
3801 uint32_t Mult2 = A.countTrailingZeros();
3804 // 2. Check if B is divisible by D.
3806 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3807 // is not less than multiplicity of this prime factor for D.
3808 if (B.countTrailingZeros() < Mult2)
3809 return SE.getCouldNotCompute();
3811 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3814 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
3815 // bit width during computations.
3816 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
3817 APInt Mod(BW + 1, 0);
3818 Mod.set(BW - Mult2); // Mod = N / D
3819 APInt I = AD.multiplicativeInverse(Mod);
3821 // 4. Compute the minimum unsigned root of the equation:
3822 // I * (B / D) mod (N / D)
3823 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3825 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3827 return SE.getConstant(Result.trunc(BW));
3830 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3831 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
3832 /// might be the same) or two SCEVCouldNotCompute objects.
3834 static std::pair<const SCEV *,const SCEV *>
3835 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3836 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3837 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3838 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3839 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3841 // We currently can only solve this if the coefficients are constants.
3842 if (!LC || !MC || !NC) {
3843 const SCEV *CNC = SE.getCouldNotCompute();
3844 return std::make_pair(CNC, CNC);
3847 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3848 const APInt &L = LC->getValue()->getValue();
3849 const APInt &M = MC->getValue()->getValue();
3850 const APInt &N = NC->getValue()->getValue();
3851 APInt Two(BitWidth, 2);
3852 APInt Four(BitWidth, 4);
3855 using namespace APIntOps;
3857 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
3858 // The B coefficient is M-N/2
3862 // The A coefficient is N/2
3863 APInt A(N.sdiv(Two));
3865 // Compute the B^2-4ac term.
3868 SqrtTerm -= Four * (A * C);
3870 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
3871 // integer value or else APInt::sqrt() will assert.
3872 APInt SqrtVal(SqrtTerm.sqrt());
3874 // Compute the two solutions for the quadratic formula.
3875 // The divisions must be performed as signed divisions.
3877 APInt TwoA( A << 1 );
3878 if (TwoA.isMinValue()) {
3879 const SCEV *CNC = SE.getCouldNotCompute();
3880 return std::make_pair(CNC, CNC);
3883 LLVMContext *Context = SE.getContext();
3885 ConstantInt *Solution1 =
3886 Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
3887 ConstantInt *Solution2 =
3888 Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
3890 return std::make_pair(SE.getConstant(Solution1),
3891 SE.getConstant(Solution2));
3892 } // end APIntOps namespace
3895 /// HowFarToZero - Return the number of times a backedge comparing the specified
3896 /// value to zero will execute. If not computable, return CouldNotCompute.
3897 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
3898 // If the value is a constant
3899 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3900 // If the value is already zero, the branch will execute zero times.
3901 if (C->getValue()->isZero()) return C;
3902 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3905 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
3906 if (!AddRec || AddRec->getLoop() != L)
3907 return getCouldNotCompute();
3909 if (AddRec->isAffine()) {
3910 // If this is an affine expression, the execution count of this branch is
3911 // the minimum unsigned root of the following equation:
3913 // Start + Step*N = 0 (mod 2^BW)
3917 // Step*N = -Start (mod 2^BW)
3919 // where BW is the common bit width of Start and Step.
3921 // Get the initial value for the loop.
3922 const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
3923 L->getParentLoop());
3924 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
3925 L->getParentLoop());
3927 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
3928 // For now we handle only constant steps.
3930 // First, handle unitary steps.
3931 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
3932 return getNegativeSCEV(Start); // N = -Start (as unsigned)
3933 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
3934 return Start; // N = Start (as unsigned)
3936 // Then, try to solve the above equation provided that Start is constant.
3937 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
3938 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
3939 -StartC->getValue()->getValue(),
3942 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
3943 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
3944 // the quadratic equation to solve it.
3945 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
3947 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
3948 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
3951 errs() << "HFTZ: " << *V << " - sol#1: " << *R1
3952 << " sol#2: " << *R2 << "\n";
3954 // Pick the smallest positive root value.
3955 if (ConstantInt *CB =
3956 dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
3957 R1->getValue(), R2->getValue()))) {
3958 if (CB->getZExtValue() == false)
3959 std::swap(R1, R2); // R1 is the minimum root now.
3961 // We can only use this value if the chrec ends up with an exact zero
3962 // value at this index. When solving for "X*X != 5", for example, we
3963 // should not accept a root of 2.
3964 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
3966 return R1; // We found a quadratic root!
3971 return getCouldNotCompute();
3974 /// HowFarToNonZero - Return the number of times a backedge checking the
3975 /// specified value for nonzero will execute. If not computable, return
3977 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
3978 // Loops that look like: while (X == 0) are very strange indeed. We don't
3979 // handle them yet except for the trivial case. This could be expanded in the
3980 // future as needed.
3982 // If the value is a constant, check to see if it is known to be non-zero
3983 // already. If so, the backedge will execute zero times.
3984 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3985 if (!C->getValue()->isNullValue())
3986 return getIntegerSCEV(0, C->getType());
3987 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3990 // We could implement others, but I really doubt anyone writes loops like
3991 // this, and if they did, they would already be constant folded.
3992 return getCouldNotCompute();
3995 /// getLoopPredecessor - If the given loop's header has exactly one unique
3996 /// predecessor outside the loop, return it. Otherwise return null.
3998 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
3999 BasicBlock *Header = L->getHeader();
4000 BasicBlock *Pred = 0;
4001 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4003 if (!L->contains(*PI)) {
4004 if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4010 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4011 /// (which may not be an immediate predecessor) which has exactly one
4012 /// successor from which BB is reachable, or null if no such block is
4016 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4017 // If the block has a unique predecessor, then there is no path from the
4018 // predecessor to the block that does not go through the direct edge
4019 // from the predecessor to the block.
4020 if (BasicBlock *Pred = BB->getSinglePredecessor())
4023 // A loop's header is defined to be a block that dominates the loop.
4024 // If the header has a unique predecessor outside the loop, it must be
4025 // a block that has exactly one successor that can reach the loop.
4026 if (Loop *L = LI->getLoopFor(BB))
4027 return getLoopPredecessor(L);
4032 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4033 /// testing whether two expressions are equal, however for the purposes of
4034 /// looking for a condition guarding a loop, it can be useful to be a little
4035 /// more general, since a front-end may have replicated the controlling
4038 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4039 // Quick check to see if they are the same SCEV.
4040 if (A == B) return true;
4042 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4043 // two different instructions with the same value. Check for this case.
4044 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4045 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4046 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4047 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4048 if (AI->isIdenticalTo(BI))
4051 // Otherwise assume they may have a different value.
4055 /// isLoopGuardedByCond - Test whether entry to the loop is protected by
4056 /// a conditional between LHS and RHS. This is used to help avoid max
4057 /// expressions in loop trip counts.
4058 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4059 ICmpInst::Predicate Pred,
4060 const SCEV *LHS, const SCEV *RHS) {
4061 // Interpret a null as meaning no loop, where there is obviously no guard
4062 // (interprocedural conditions notwithstanding).
4063 if (!L) return false;
4065 BasicBlock *Predecessor = getLoopPredecessor(L);
4066 BasicBlock *PredecessorDest = L->getHeader();
4068 // Starting at the loop predecessor, climb up the predecessor chain, as long
4069 // as there are predecessors that can be found that have unique successors
4070 // leading to the original header.
4072 PredecessorDest = Predecessor,
4073 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4075 BranchInst *LoopEntryPredicate =
4076 dyn_cast<BranchInst>(Predecessor->getTerminator());
4077 if (!LoopEntryPredicate ||
4078 LoopEntryPredicate->isUnconditional())
4081 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4082 LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4089 /// isNecessaryCond - Test whether the given CondValue value is a condition
4090 /// which is at least as strict as the one described by Pred, LHS, and RHS.
4091 bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4092 ICmpInst::Predicate Pred,
4093 const SCEV *LHS, const SCEV *RHS,
4095 // Recursivly handle And and Or conditions.
4096 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4097 if (BO->getOpcode() == Instruction::And) {
4099 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4100 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4101 } else if (BO->getOpcode() == Instruction::Or) {
4103 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4104 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4108 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4109 if (!ICI) return false;
4111 // Now that we found a conditional branch that dominates the loop, check to
4112 // see if it is the comparison we are looking for.
4113 Value *PreCondLHS = ICI->getOperand(0);
4114 Value *PreCondRHS = ICI->getOperand(1);
4115 ICmpInst::Predicate Cond;
4117 Cond = ICI->getInversePredicate();
4119 Cond = ICI->getPredicate();
4122 ; // An exact match.
4123 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE)
4124 ; // The actual condition is beyond sufficient.
4126 // Check a few special cases.
4128 case ICmpInst::ICMP_UGT:
4129 if (Pred == ICmpInst::ICMP_ULT) {
4130 std::swap(PreCondLHS, PreCondRHS);
4131 Cond = ICmpInst::ICMP_ULT;
4135 case ICmpInst::ICMP_SGT:
4136 if (Pred == ICmpInst::ICMP_SLT) {
4137 std::swap(PreCondLHS, PreCondRHS);
4138 Cond = ICmpInst::ICMP_SLT;
4142 case ICmpInst::ICMP_NE:
4143 // Expressions like (x >u 0) are often canonicalized to (x != 0),
4144 // so check for this case by checking if the NE is comparing against
4145 // a minimum or maximum constant.
4146 if (!ICmpInst::isTrueWhenEqual(Pred))
4147 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) {
4148 const APInt &A = CI->getValue();
4150 case ICmpInst::ICMP_SLT:
4151 if (A.isMaxSignedValue()) break;
4153 case ICmpInst::ICMP_SGT:
4154 if (A.isMinSignedValue()) break;
4156 case ICmpInst::ICMP_ULT:
4157 if (A.isMaxValue()) break;
4159 case ICmpInst::ICMP_UGT:
4160 if (A.isMinValue()) break;
4165 Cond = ICmpInst::ICMP_NE;
4166 // NE is symmetric but the original comparison may not be. Swap
4167 // the operands if necessary so that they match below.
4168 if (isa<SCEVConstant>(LHS))
4169 std::swap(PreCondLHS, PreCondRHS);
4174 // We weren't able to reconcile the condition.
4178 if (!PreCondLHS->getType()->isInteger()) return false;
4180 const SCEV *PreCondLHSSCEV = getSCEV(PreCondLHS);
4181 const SCEV *PreCondRHSSCEV = getSCEV(PreCondRHS);
4182 return (HasSameValue(LHS, PreCondLHSSCEV) &&
4183 HasSameValue(RHS, PreCondRHSSCEV)) ||
4184 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) &&
4185 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)));
4188 /// getBECount - Subtract the end and start values and divide by the step,
4189 /// rounding up, to get the number of times the backedge is executed. Return
4190 /// CouldNotCompute if an intermediate computation overflows.
4191 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4194 const Type *Ty = Start->getType();
4195 const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4196 const SCEV *Diff = getMinusSCEV(End, Start);
4197 const SCEV *RoundUp = getAddExpr(Step, NegOne);
4199 // Add an adjustment to the difference between End and Start so that
4200 // the division will effectively round up.
4201 const SCEV *Add = getAddExpr(Diff, RoundUp);
4203 // Check Add for unsigned overflow.
4204 // TODO: More sophisticated things could be done here.
4205 const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4206 const SCEV *OperandExtendedAdd =
4207 getAddExpr(getZeroExtendExpr(Diff, WideTy),
4208 getZeroExtendExpr(RoundUp, WideTy));
4209 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4210 return getCouldNotCompute();
4212 return getUDivExpr(Add, Step);
4215 /// HowManyLessThans - Return the number of times a backedge containing the
4216 /// specified less-than comparison will execute. If not computable, return
4217 /// CouldNotCompute.
4218 ScalarEvolution::BackedgeTakenInfo
4219 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4220 const Loop *L, bool isSigned) {
4221 // Only handle: "ADDREC < LoopInvariant".
4222 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4224 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4225 if (!AddRec || AddRec->getLoop() != L)
4226 return getCouldNotCompute();
4228 if (AddRec->isAffine()) {
4229 // FORNOW: We only support unit strides.
4230 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4231 const SCEV *Step = AddRec->getStepRecurrence(*this);
4233 // TODO: handle non-constant strides.
4234 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4235 if (!CStep || CStep->isZero())
4236 return getCouldNotCompute();
4237 if (CStep->isOne()) {
4238 // With unit stride, the iteration never steps past the limit value.
4239 } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4240 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4241 // Test whether a positive iteration iteration can step past the limit
4242 // value and past the maximum value for its type in a single step.
4244 APInt Max = APInt::getSignedMaxValue(BitWidth);
4245 if ((Max - CStep->getValue()->getValue())
4246 .slt(CLimit->getValue()->getValue()))
4247 return getCouldNotCompute();
4249 APInt Max = APInt::getMaxValue(BitWidth);
4250 if ((Max - CStep->getValue()->getValue())
4251 .ult(CLimit->getValue()->getValue()))
4252 return getCouldNotCompute();
4255 // TODO: handle non-constant limit values below.
4256 return getCouldNotCompute();
4258 // TODO: handle negative strides below.
4259 return getCouldNotCompute();
4261 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4262 // m. So, we count the number of iterations in which {n,+,s} < m is true.
4263 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4264 // treat m-n as signed nor unsigned due to overflow possibility.
4266 // First, we get the value of the LHS in the first iteration: n
4267 const SCEV *Start = AddRec->getOperand(0);
4269 // Determine the minimum constant start value.
4270 const SCEV *MinStart = isa<SCEVConstant>(Start) ? Start :
4271 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) :
4272 APInt::getMinValue(BitWidth));
4274 // If we know that the condition is true in order to enter the loop,
4275 // then we know that it will run exactly (m-n)/s times. Otherwise, we
4276 // only know that it will execute (max(m,n)-n)/s times. In both cases,
4277 // the division must round up.
4278 const SCEV *End = RHS;
4279 if (!isLoopGuardedByCond(L,
4280 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
4281 getMinusSCEV(Start, Step), RHS))
4282 End = isSigned ? getSMaxExpr(RHS, Start)
4283 : getUMaxExpr(RHS, Start);
4285 // Determine the maximum constant end value.
4286 const SCEV *MaxEnd =
4287 isa<SCEVConstant>(End) ? End :
4288 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth)
4289 .ashr(GetMinSignBits(End) - 1) :
4290 APInt::getMaxValue(BitWidth)
4291 .lshr(GetMinLeadingZeros(End)));
4293 // Finally, we subtract these two values and divide, rounding up, to get
4294 // the number of times the backedge is executed.
4295 const SCEV *BECount = getBECount(Start, End, Step);
4297 // The maximum backedge count is similar, except using the minimum start
4298 // value and the maximum end value.
4299 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4301 return BackedgeTakenInfo(BECount, MaxBECount);
4304 return getCouldNotCompute();
4307 /// getNumIterationsInRange - Return the number of iterations of this loop that
4308 /// produce values in the specified constant range. Another way of looking at
4309 /// this is that it returns the first iteration number where the value is not in
4310 /// the condition, thus computing the exit count. If the iteration count can't
4311 /// be computed, an instance of SCEVCouldNotCompute is returned.
4312 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4313 ScalarEvolution &SE) const {
4314 if (Range.isFullSet()) // Infinite loop.
4315 return SE.getCouldNotCompute();
4317 // If the start is a non-zero constant, shift the range to simplify things.
4318 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4319 if (!SC->getValue()->isZero()) {
4320 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4321 Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4322 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4323 if (const SCEVAddRecExpr *ShiftedAddRec =
4324 dyn_cast<SCEVAddRecExpr>(Shifted))
4325 return ShiftedAddRec->getNumIterationsInRange(
4326 Range.subtract(SC->getValue()->getValue()), SE);
4327 // This is strange and shouldn't happen.
4328 return SE.getCouldNotCompute();
4331 // The only time we can solve this is when we have all constant indices.
4332 // Otherwise, we cannot determine the overflow conditions.
4333 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4334 if (!isa<SCEVConstant>(getOperand(i)))
4335 return SE.getCouldNotCompute();
4338 // Okay at this point we know that all elements of the chrec are constants and
4339 // that the start element is zero.
4341 // First check to see if the range contains zero. If not, the first
4343 unsigned BitWidth = SE.getTypeSizeInBits(getType());
4344 if (!Range.contains(APInt(BitWidth, 0)))
4345 return SE.getIntegerSCEV(0, getType());
4348 // If this is an affine expression then we have this situation:
4349 // Solve {0,+,A} in Range === Ax in Range
4351 // We know that zero is in the range. If A is positive then we know that
4352 // the upper value of the range must be the first possible exit value.
4353 // If A is negative then the lower of the range is the last possible loop
4354 // value. Also note that we already checked for a full range.
4355 APInt One(BitWidth,1);
4356 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4357 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4359 // The exit value should be (End+A)/A.
4360 APInt ExitVal = (End + A).udiv(A);
4361 ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4363 // Evaluate at the exit value. If we really did fall out of the valid
4364 // range, then we computed our trip count, otherwise wrap around or other
4365 // things must have happened.
4366 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4367 if (Range.contains(Val->getValue()))
4368 return SE.getCouldNotCompute(); // Something strange happened
4370 // Ensure that the previous value is in the range. This is a sanity check.
4371 assert(Range.contains(
4372 EvaluateConstantChrecAtConstant(this,
4373 SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4374 "Linear scev computation is off in a bad way!");
4375 return SE.getConstant(ExitValue);
4376 } else if (isQuadratic()) {
4377 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4378 // quadratic equation to solve it. To do this, we must frame our problem in
4379 // terms of figuring out when zero is crossed, instead of when
4380 // Range.getUpper() is crossed.
4381 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4382 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4383 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4385 // Next, solve the constructed addrec
4386 std::pair<const SCEV *,const SCEV *> Roots =
4387 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4388 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4389 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4391 // Pick the smallest positive root value.
4392 if (ConstantInt *CB =
4393 dyn_cast<ConstantInt>(
4394 SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4395 R1->getValue(), R2->getValue()))) {
4396 if (CB->getZExtValue() == false)
4397 std::swap(R1, R2); // R1 is the minimum root now.
4399 // Make sure the root is not off by one. The returned iteration should
4400 // not be in the range, but the previous one should be. When solving
4401 // for "X*X < 5", for example, we should not return a root of 2.
4402 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4405 if (Range.contains(R1Val->getValue())) {
4406 // The next iteration must be out of the range...
4407 ConstantInt *NextVal =
4408 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4410 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4411 if (!Range.contains(R1Val->getValue()))
4412 return SE.getConstant(NextVal);
4413 return SE.getCouldNotCompute(); // Something strange happened
4416 // If R1 was not in the range, then it is a good return value. Make
4417 // sure that R1-1 WAS in the range though, just in case.
4418 ConstantInt *NextVal =
4419 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4420 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4421 if (Range.contains(R1Val->getValue()))
4423 return SE.getCouldNotCompute(); // Something strange happened
4428 return SE.getCouldNotCompute();
4433 //===----------------------------------------------------------------------===//
4434 // SCEVCallbackVH Class Implementation
4435 //===----------------------------------------------------------------------===//
4437 void ScalarEvolution::SCEVCallbackVH::deleted() {
4438 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4439 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4440 SE->ConstantEvolutionLoopExitValue.erase(PN);
4441 if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4442 SE->ValuesAtScopes.erase(I);
4443 SE->Scalars.erase(getValPtr());
4444 // this now dangles!
4447 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4448 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4450 // Forget all the expressions associated with users of the old value,
4451 // so that future queries will recompute the expressions using the new
4453 SmallVector<User *, 16> Worklist;
4454 Value *Old = getValPtr();
4455 bool DeleteOld = false;
4456 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4458 Worklist.push_back(*UI);
4459 while (!Worklist.empty()) {
4460 User *U = Worklist.pop_back_val();
4461 // Deleting the Old value will cause this to dangle. Postpone
4462 // that until everything else is done.
4467 if (PHINode *PN = dyn_cast<PHINode>(U))
4468 SE->ConstantEvolutionLoopExitValue.erase(PN);
4469 if (Instruction *I = dyn_cast<Instruction>(U))
4470 SE->ValuesAtScopes.erase(I);
4471 if (SE->Scalars.erase(U))
4472 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4474 Worklist.push_back(*UI);
4477 if (PHINode *PN = dyn_cast<PHINode>(Old))
4478 SE->ConstantEvolutionLoopExitValue.erase(PN);
4479 if (Instruction *I = dyn_cast<Instruction>(Old))
4480 SE->ValuesAtScopes.erase(I);
4481 SE->Scalars.erase(Old);
4482 // this now dangles!
4487 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4488 : CallbackVH(V), SE(se) {}
4490 //===----------------------------------------------------------------------===//
4491 // ScalarEvolution Class Implementation
4492 //===----------------------------------------------------------------------===//
4494 ScalarEvolution::ScalarEvolution()
4495 : FunctionPass(&ID) {
4498 bool ScalarEvolution::runOnFunction(Function &F) {
4500 LI = &getAnalysis<LoopInfo>();
4501 TD = getAnalysisIfAvailable<TargetData>();
4505 void ScalarEvolution::releaseMemory() {
4507 BackedgeTakenCounts.clear();
4508 ConstantEvolutionLoopExitValue.clear();
4509 ValuesAtScopes.clear();
4510 UniqueSCEVs.clear();
4511 SCEVAllocator.Reset();
4514 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4515 AU.setPreservesAll();
4516 AU.addRequiredTransitive<LoopInfo>();
4519 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4520 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4523 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4525 // Print all inner loops first
4526 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4527 PrintLoopInfo(OS, SE, *I);
4529 OS << "Loop " << L->getHeader()->getName() << ": ";
4531 SmallVector<BasicBlock*, 8> ExitBlocks;
4532 L->getExitBlocks(ExitBlocks);
4533 if (ExitBlocks.size() != 1)
4534 OS << "<multiple exits> ";
4536 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4537 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4539 OS << "Unpredictable backedge-taken count. ";
4543 OS << "Loop " << L->getHeader()->getName() << ": ";
4545 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4546 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4548 OS << "Unpredictable max backedge-taken count. ";
4554 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4555 // ScalarEvolution's implementaiton of the print method is to print
4556 // out SCEV values of all instructions that are interesting. Doing
4557 // this potentially causes it to create new SCEV objects though,
4558 // which technically conflicts with the const qualifier. This isn't
4559 // observable from outside the class though (the hasSCEV function
4560 // notwithstanding), so casting away the const isn't dangerous.
4561 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4563 OS << "Classifying expressions for: " << F->getName() << "\n";
4564 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4565 if (isSCEVable(I->getType())) {
4568 const SCEV *SV = SE.getSCEV(&*I);
4571 const Loop *L = LI->getLoopFor((*I).getParent());
4573 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4580 OS << "\t\t" "Exits: ";
4581 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4582 if (!ExitValue->isLoopInvariant(L)) {
4583 OS << "<<Unknown>>";
4592 OS << "Determining loop execution counts for: " << F->getName() << "\n";
4593 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4594 PrintLoopInfo(OS, &SE, *I);
4597 void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4598 raw_os_ostream OS(o);