1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. These classes are reference counted, managed by the const SCEV *
18 // class. We only create one SCEV of a particular shape, so pointer-comparisons
19 // for equality are legal.
21 // One important aspect of the SCEV objects is that they are never cyclic, even
22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
24 // recurrence) then we represent it directly as a recurrence node, otherwise we
25 // represent it as a SCEVUnknown node.
27 // In addition to being able to represent expressions of various types, we also
28 // have folders that are used to build the *canonical* representation for a
29 // particular expression. These folders are capable of using a variety of
30 // rewrite rules to simplify the expressions.
32 // Once the folders are defined, we can implement the more interesting
33 // higher-level code, such as the code that recognizes PHI nodes of various
34 // types, computes the execution count of a loop, etc.
36 // TODO: We should use these routines and value representations to implement
37 // dependence analysis!
39 //===----------------------------------------------------------------------===//
41 // There are several good references for the techniques used in this analysis.
43 // Chains of recurrences -- a method to expedite the evaluation
44 // of closed-form functions
45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
47 // On computational properties of chains of recurrences
50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51 // Robert A. van Engelen
53 // Efficient Symbolic Analysis for Optimizing Compilers
54 // Robert A. van Engelen
56 // Using the chains of recurrences algebra for data dependence testing and
57 // induction variable substitution
58 // MS Thesis, Johnie Birch
60 //===----------------------------------------------------------------------===//
62 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
64 #include "llvm/Constants.h"
65 #include "llvm/DerivedTypes.h"
66 #include "llvm/GlobalVariable.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Analysis/ConstantFolding.h"
70 #include "llvm/Analysis/Dominators.h"
71 #include "llvm/Analysis/LoopInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/Assembly/Writer.h"
74 #include "llvm/Target/TargetData.h"
75 #include "llvm/Support/CommandLine.h"
76 #include "llvm/Support/Compiler.h"
77 #include "llvm/Support/ConstantRange.h"
78 #include "llvm/Support/ErrorHandling.h"
79 #include "llvm/Support/GetElementPtrTypeIterator.h"
80 #include "llvm/Support/InstIterator.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include "llvm/ADT/Statistic.h"
84 #include "llvm/ADT/STLExtras.h"
85 #include "llvm/ADT/SmallPtrSet.h"
89 STATISTIC(NumArrayLenItCounts,
90 "Number of trip counts computed with array length");
91 STATISTIC(NumTripCountsComputed,
92 "Number of loops with predictable loop counts");
93 STATISTIC(NumTripCountsNotComputed,
94 "Number of loops without predictable loop counts");
95 STATISTIC(NumBruteForceTripCountsComputed,
96 "Number of loops with trip counts computed by force");
98 static cl::opt<unsigned>
99 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
100 cl::desc("Maximum number of iterations SCEV will "
101 "symbolically execute a constant "
105 static RegisterPass<ScalarEvolution>
106 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
107 char ScalarEvolution::ID = 0;
109 //===----------------------------------------------------------------------===//
110 // SCEV class definitions
111 //===----------------------------------------------------------------------===//
113 //===----------------------------------------------------------------------===//
114 // Implementation of the SCEV class.
119 void SCEV::dump() const {
124 void SCEV::print(std::ostream &o) const {
125 raw_os_ostream OS(o);
129 bool SCEV::isZero() const {
130 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
131 return SC->getValue()->isZero();
135 bool SCEV::isOne() const {
136 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
137 return SC->getValue()->isOne();
141 bool SCEV::isAllOnesValue() const {
142 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
143 return SC->getValue()->isAllOnesValue();
147 SCEVCouldNotCompute::SCEVCouldNotCompute() :
148 SCEV(scCouldNotCompute) {}
150 void SCEVCouldNotCompute::Profile(FoldingSetNodeID &ID) const {
151 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
154 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
155 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
159 const Type *SCEVCouldNotCompute::getType() const {
160 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
164 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
165 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
170 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
173 ScalarEvolution &SE) const {
177 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
178 OS << "***COULDNOTCOMPUTE***";
181 bool SCEVCouldNotCompute::classof(const SCEV *S) {
182 return S->getSCEVType() == scCouldNotCompute;
185 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
187 ID.AddInteger(scConstant);
190 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
191 SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
192 new (S) SCEVConstant(V);
193 UniqueSCEVs.InsertNode(S, IP);
197 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
198 return getConstant(ConstantInt::get(Val));
202 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
203 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
206 void SCEVConstant::Profile(FoldingSetNodeID &ID) const {
207 ID.AddInteger(scConstant);
211 const Type *SCEVConstant::getType() const { return V->getType(); }
213 void SCEVConstant::print(raw_ostream &OS) const {
214 WriteAsOperand(OS, V, false);
217 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy,
218 const SCEV *op, const Type *ty)
219 : SCEV(SCEVTy), Op(op), Ty(ty) {}
221 void SCEVCastExpr::Profile(FoldingSetNodeID &ID) const {
222 ID.AddInteger(getSCEVType());
227 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
228 return Op->dominates(BB, DT);
231 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV *op, const Type *ty)
232 : SCEVCastExpr(scTruncate, op, ty) {
233 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
234 (Ty->isInteger() || isa<PointerType>(Ty)) &&
235 "Cannot truncate non-integer value!");
238 void SCEVTruncateExpr::print(raw_ostream &OS) const {
239 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
242 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV *op, const Type *ty)
243 : SCEVCastExpr(scZeroExtend, op, ty) {
244 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
245 (Ty->isInteger() || isa<PointerType>(Ty)) &&
246 "Cannot zero extend non-integer value!");
249 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
250 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
253 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV *op, const Type *ty)
254 : SCEVCastExpr(scSignExtend, op, ty) {
255 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
256 (Ty->isInteger() || isa<PointerType>(Ty)) &&
257 "Cannot sign extend non-integer value!");
260 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
261 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
264 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
265 assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
266 const char *OpStr = getOperationStr();
267 OS << "(" << *Operands[0];
268 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
269 OS << OpStr << *Operands[i];
274 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
277 ScalarEvolution &SE) const {
278 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
280 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
281 if (H != getOperand(i)) {
282 SmallVector<const SCEV *, 8> NewOps;
283 NewOps.reserve(getNumOperands());
284 for (unsigned j = 0; j != i; ++j)
285 NewOps.push_back(getOperand(j));
287 for (++i; i != e; ++i)
288 NewOps.push_back(getOperand(i)->
289 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
291 if (isa<SCEVAddExpr>(this))
292 return SE.getAddExpr(NewOps);
293 else if (isa<SCEVMulExpr>(this))
294 return SE.getMulExpr(NewOps);
295 else if (isa<SCEVSMaxExpr>(this))
296 return SE.getSMaxExpr(NewOps);
297 else if (isa<SCEVUMaxExpr>(this))
298 return SE.getUMaxExpr(NewOps);
300 LLVM_UNREACHABLE("Unknown commutative expr!");
306 void SCEVNAryExpr::Profile(FoldingSetNodeID &ID) const {
307 ID.AddInteger(getSCEVType());
308 ID.AddInteger(Operands.size());
309 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
310 ID.AddPointer(Operands[i]);
313 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
314 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
315 if (!getOperand(i)->dominates(BB, DT))
321 void SCEVUDivExpr::Profile(FoldingSetNodeID &ID) const {
322 ID.AddInteger(scUDivExpr);
327 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
328 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
331 void SCEVUDivExpr::print(raw_ostream &OS) const {
332 OS << "(" << *LHS << " /u " << *RHS << ")";
335 const Type *SCEVUDivExpr::getType() const {
336 // In most cases the types of LHS and RHS will be the same, but in some
337 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
338 // depend on the type for correctness, but handling types carefully can
339 // avoid extra casts in the SCEVExpander. The LHS is more likely to be
340 // a pointer type than the RHS, so use the RHS' type here.
341 return RHS->getType();
344 void SCEVAddRecExpr::Profile(FoldingSetNodeID &ID) const {
345 ID.AddInteger(scAddRecExpr);
346 ID.AddInteger(Operands.size());
347 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
348 ID.AddPointer(Operands[i]);
353 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
355 ScalarEvolution &SE) const {
356 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
358 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
359 if (H != getOperand(i)) {
360 SmallVector<const SCEV *, 8> NewOps;
361 NewOps.reserve(getNumOperands());
362 for (unsigned j = 0; j != i; ++j)
363 NewOps.push_back(getOperand(j));
365 for (++i; i != e; ++i)
366 NewOps.push_back(getOperand(i)->
367 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
369 return SE.getAddRecExpr(NewOps, L);
376 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
377 // Add recurrences are never invariant in the function-body (null loop).
381 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
382 if (QueryLoop->contains(L->getHeader()))
385 // This recurrence is variant w.r.t. QueryLoop if any of its operands
387 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
388 if (!getOperand(i)->isLoopInvariant(QueryLoop))
391 // Otherwise it's loop-invariant.
395 void SCEVAddRecExpr::print(raw_ostream &OS) const {
396 OS << "{" << *Operands[0];
397 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
398 OS << ",+," << *Operands[i];
399 OS << "}<" << L->getHeader()->getName() + ">";
402 void SCEVUnknown::Profile(FoldingSetNodeID &ID) const {
403 ID.AddInteger(scUnknown);
407 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
408 // All non-instruction values are loop invariant. All instructions are loop
409 // invariant if they are not contained in the specified loop.
410 // Instructions are never considered invariant in the function body
411 // (null loop) because they are defined within the "loop".
412 if (Instruction *I = dyn_cast<Instruction>(V))
413 return L && !L->contains(I->getParent());
417 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
418 if (Instruction *I = dyn_cast<Instruction>(getValue()))
419 return DT->dominates(I->getParent(), BB);
423 const Type *SCEVUnknown::getType() const {
427 void SCEVUnknown::print(raw_ostream &OS) const {
428 WriteAsOperand(OS, V, false);
431 //===----------------------------------------------------------------------===//
433 //===----------------------------------------------------------------------===//
436 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
437 /// than the complexity of the RHS. This comparator is used to canonicalize
439 class VISIBILITY_HIDDEN SCEVComplexityCompare {
442 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
444 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
445 // Primarily, sort the SCEVs by their getSCEVType().
446 if (LHS->getSCEVType() != RHS->getSCEVType())
447 return LHS->getSCEVType() < RHS->getSCEVType();
449 // Aside from the getSCEVType() ordering, the particular ordering
450 // isn't very important except that it's beneficial to be consistent,
451 // so that (a + b) and (b + a) don't end up as different expressions.
453 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
454 // not as complete as it could be.
455 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
456 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
458 // Order pointer values after integer values. This helps SCEVExpander
460 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
462 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
465 // Compare getValueID values.
466 if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
467 return LU->getValue()->getValueID() < RU->getValue()->getValueID();
469 // Sort arguments by their position.
470 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
471 const Argument *RA = cast<Argument>(RU->getValue());
472 return LA->getArgNo() < RA->getArgNo();
475 // For instructions, compare their loop depth, and their opcode.
476 // This is pretty loose.
477 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
478 Instruction *RV = cast<Instruction>(RU->getValue());
480 // Compare loop depths.
481 if (LI->getLoopDepth(LV->getParent()) !=
482 LI->getLoopDepth(RV->getParent()))
483 return LI->getLoopDepth(LV->getParent()) <
484 LI->getLoopDepth(RV->getParent());
487 if (LV->getOpcode() != RV->getOpcode())
488 return LV->getOpcode() < RV->getOpcode();
490 // Compare the number of operands.
491 if (LV->getNumOperands() != RV->getNumOperands())
492 return LV->getNumOperands() < RV->getNumOperands();
498 // Compare constant values.
499 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
500 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
501 if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
502 return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
503 return LC->getValue()->getValue().ult(RC->getValue()->getValue());
506 // Compare addrec loop depths.
507 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
508 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
509 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
510 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
513 // Lexicographically compare n-ary expressions.
514 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
515 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
516 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
517 if (i >= RC->getNumOperands())
519 if (operator()(LC->getOperand(i), RC->getOperand(i)))
521 if (operator()(RC->getOperand(i), LC->getOperand(i)))
524 return LC->getNumOperands() < RC->getNumOperands();
527 // Lexicographically compare udiv expressions.
528 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
529 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
530 if (operator()(LC->getLHS(), RC->getLHS()))
532 if (operator()(RC->getLHS(), LC->getLHS()))
534 if (operator()(LC->getRHS(), RC->getRHS()))
536 if (operator()(RC->getRHS(), LC->getRHS()))
541 // Compare cast expressions by operand.
542 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
543 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
544 return operator()(LC->getOperand(), RC->getOperand());
547 LLVM_UNREACHABLE("Unknown SCEV kind!");
553 /// GroupByComplexity - Given a list of SCEV objects, order them by their
554 /// complexity, and group objects of the same complexity together by value.
555 /// When this routine is finished, we know that any duplicates in the vector are
556 /// consecutive and that complexity is monotonically increasing.
558 /// Note that we go take special precautions to ensure that we get determinstic
559 /// results from this routine. In other words, we don't want the results of
560 /// this to depend on where the addresses of various SCEV objects happened to
563 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
565 if (Ops.size() < 2) return; // Noop
566 if (Ops.size() == 2) {
567 // This is the common case, which also happens to be trivially simple.
569 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
570 std::swap(Ops[0], Ops[1]);
574 // Do the rough sort by complexity.
575 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
577 // Now that we are sorted by complexity, group elements of the same
578 // complexity. Note that this is, at worst, N^2, but the vector is likely to
579 // be extremely short in practice. Note that we take this approach because we
580 // do not want to depend on the addresses of the objects we are grouping.
581 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
582 const SCEV *S = Ops[i];
583 unsigned Complexity = S->getSCEVType();
585 // If there are any objects of the same complexity and same value as this
587 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
588 if (Ops[j] == S) { // Found a duplicate.
589 // Move it to immediately after i'th element.
590 std::swap(Ops[i+1], Ops[j]);
591 ++i; // no need to rescan it.
592 if (i == e-2) return; // Done!
600 //===----------------------------------------------------------------------===//
601 // Simple SCEV method implementations
602 //===----------------------------------------------------------------------===//
604 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
606 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
608 const Type* ResultTy) {
609 // Handle the simplest case efficiently.
611 return SE.getTruncateOrZeroExtend(It, ResultTy);
613 // We are using the following formula for BC(It, K):
615 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
617 // Suppose, W is the bitwidth of the return value. We must be prepared for
618 // overflow. Hence, we must assure that the result of our computation is
619 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
620 // safe in modular arithmetic.
622 // However, this code doesn't use exactly that formula; the formula it uses
623 // is something like the following, where T is the number of factors of 2 in
624 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
627 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
629 // This formula is trivially equivalent to the previous formula. However,
630 // this formula can be implemented much more efficiently. The trick is that
631 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
632 // arithmetic. To do exact division in modular arithmetic, all we have
633 // to do is multiply by the inverse. Therefore, this step can be done at
636 // The next issue is how to safely do the division by 2^T. The way this
637 // is done is by doing the multiplication step at a width of at least W + T
638 // bits. This way, the bottom W+T bits of the product are accurate. Then,
639 // when we perform the division by 2^T (which is equivalent to a right shift
640 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
641 // truncated out after the division by 2^T.
643 // In comparison to just directly using the first formula, this technique
644 // is much more efficient; using the first formula requires W * K bits,
645 // but this formula less than W + K bits. Also, the first formula requires
646 // a division step, whereas this formula only requires multiplies and shifts.
648 // It doesn't matter whether the subtraction step is done in the calculation
649 // width or the input iteration count's width; if the subtraction overflows,
650 // the result must be zero anyway. We prefer here to do it in the width of
651 // the induction variable because it helps a lot for certain cases; CodeGen
652 // isn't smart enough to ignore the overflow, which leads to much less
653 // efficient code if the width of the subtraction is wider than the native
656 // (It's possible to not widen at all by pulling out factors of 2 before
657 // the multiplication; for example, K=2 can be calculated as
658 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
659 // extra arithmetic, so it's not an obvious win, and it gets
660 // much more complicated for K > 3.)
662 // Protection from insane SCEVs; this bound is conservative,
663 // but it probably doesn't matter.
665 return SE.getCouldNotCompute();
667 unsigned W = SE.getTypeSizeInBits(ResultTy);
669 // Calculate K! / 2^T and T; we divide out the factors of two before
670 // multiplying for calculating K! / 2^T to avoid overflow.
671 // Other overflow doesn't matter because we only care about the bottom
672 // W bits of the result.
673 APInt OddFactorial(W, 1);
675 for (unsigned i = 3; i <= K; ++i) {
677 unsigned TwoFactors = Mult.countTrailingZeros();
679 Mult = Mult.lshr(TwoFactors);
680 OddFactorial *= Mult;
683 // We need at least W + T bits for the multiplication step
684 unsigned CalculationBits = W + T;
686 // Calcuate 2^T, at width T+W.
687 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
689 // Calculate the multiplicative inverse of K! / 2^T;
690 // this multiplication factor will perform the exact division by
692 APInt Mod = APInt::getSignedMinValue(W+1);
693 APInt MultiplyFactor = OddFactorial.zext(W+1);
694 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
695 MultiplyFactor = MultiplyFactor.trunc(W);
697 // Calculate the product, at width T+W
698 const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
699 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
700 for (unsigned i = 1; i != K; ++i) {
701 const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
702 Dividend = SE.getMulExpr(Dividend,
703 SE.getTruncateOrZeroExtend(S, CalculationTy));
707 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
709 // Truncate the result, and divide by K! / 2^T.
711 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
712 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
715 /// evaluateAtIteration - Return the value of this chain of recurrences at
716 /// the specified iteration number. We can evaluate this recurrence by
717 /// multiplying each element in the chain by the binomial coefficient
718 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
720 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
722 /// where BC(It, k) stands for binomial coefficient.
724 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
725 ScalarEvolution &SE) const {
726 const SCEV *Result = getStart();
727 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
728 // The computation is correct in the face of overflow provided that the
729 // multiplication is performed _after_ the evaluation of the binomial
731 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
732 if (isa<SCEVCouldNotCompute>(Coeff))
735 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
740 //===----------------------------------------------------------------------===//
741 // SCEV Expression folder implementations
742 //===----------------------------------------------------------------------===//
744 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
746 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
747 "This is not a truncating conversion!");
748 assert(isSCEVable(Ty) &&
749 "This is not a conversion to a SCEVable type!");
750 Ty = getEffectiveSCEVType(Ty);
752 // Fold if the operand is constant.
753 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
755 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
757 // trunc(trunc(x)) --> trunc(x)
758 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
759 return getTruncateExpr(ST->getOperand(), Ty);
761 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
762 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
763 return getTruncateOrSignExtend(SS->getOperand(), Ty);
765 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
766 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
767 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
769 // If the input value is a chrec scev, truncate the chrec's operands.
770 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
771 SmallVector<const SCEV *, 4> Operands;
772 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
773 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
774 return getAddRecExpr(Operands, AddRec->getLoop());
778 ID.AddInteger(scTruncate);
782 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
783 SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
784 new (S) SCEVTruncateExpr(Op, Ty);
785 UniqueSCEVs.InsertNode(S, IP);
789 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
791 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
792 "This is not an extending conversion!");
793 assert(isSCEVable(Ty) &&
794 "This is not a conversion to a SCEVable type!");
795 Ty = getEffectiveSCEVType(Ty);
797 // Fold if the operand is constant.
798 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
799 const Type *IntTy = getEffectiveSCEVType(Ty);
800 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
801 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
802 return getConstant(cast<ConstantInt>(C));
805 // zext(zext(x)) --> zext(x)
806 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
807 return getZeroExtendExpr(SZ->getOperand(), Ty);
809 // If the input value is a chrec scev, and we can prove that the value
810 // did not overflow the old, smaller, value, we can zero extend all of the
811 // operands (often constants). This allows analysis of something like
812 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
813 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
814 if (AR->isAffine()) {
815 // Check whether the backedge-taken count is SCEVCouldNotCompute.
816 // Note that this serves two purposes: It filters out loops that are
817 // simply not analyzable, and it covers the case where this code is
818 // being called from within backedge-taken count analysis, such that
819 // attempting to ask for the backedge-taken count would likely result
820 // in infinite recursion. In the later case, the analysis code will
821 // cope with a conservative value, and it will take care to purge
822 // that value once it has finished.
823 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
824 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
825 // Manually compute the final value for AR, checking for
827 const SCEV *Start = AR->getStart();
828 const SCEV *Step = AR->getStepRecurrence(*this);
830 // Check whether the backedge-taken count can be losslessly casted to
831 // the addrec's type. The count is always unsigned.
832 const SCEV *CastedMaxBECount =
833 getTruncateOrZeroExtend(MaxBECount, Start->getType());
834 const SCEV *RecastedMaxBECount =
835 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
836 if (MaxBECount == RecastedMaxBECount) {
838 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
839 // Check whether Start+Step*MaxBECount has no unsigned overflow.
841 getMulExpr(CastedMaxBECount,
842 getTruncateOrZeroExtend(Step, Start->getType()));
843 const SCEV *Add = getAddExpr(Start, ZMul);
844 const SCEV *OperandExtendedAdd =
845 getAddExpr(getZeroExtendExpr(Start, WideTy),
846 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
847 getZeroExtendExpr(Step, WideTy)));
848 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
849 // Return the expression with the addrec on the outside.
850 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
851 getZeroExtendExpr(Step, Ty),
854 // Similar to above, only this time treat the step value as signed.
855 // This covers loops that count down.
857 getMulExpr(CastedMaxBECount,
858 getTruncateOrSignExtend(Step, Start->getType()));
859 Add = getAddExpr(Start, SMul);
861 getAddExpr(getZeroExtendExpr(Start, WideTy),
862 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
863 getSignExtendExpr(Step, WideTy)));
864 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
865 // Return the expression with the addrec on the outside.
866 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
867 getSignExtendExpr(Step, Ty),
874 ID.AddInteger(scZeroExtend);
878 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
879 SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
880 new (S) SCEVZeroExtendExpr(Op, Ty);
881 UniqueSCEVs.InsertNode(S, IP);
885 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
887 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
888 "This is not an extending conversion!");
889 assert(isSCEVable(Ty) &&
890 "This is not a conversion to a SCEVable type!");
891 Ty = getEffectiveSCEVType(Ty);
893 // Fold if the operand is constant.
894 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
895 const Type *IntTy = getEffectiveSCEVType(Ty);
896 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
897 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
898 return getConstant(cast<ConstantInt>(C));
901 // sext(sext(x)) --> sext(x)
902 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
903 return getSignExtendExpr(SS->getOperand(), Ty);
905 // If the input value is a chrec scev, and we can prove that the value
906 // did not overflow the old, smaller, value, we can sign extend all of the
907 // operands (often constants). This allows analysis of something like
908 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
909 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
910 if (AR->isAffine()) {
911 // Check whether the backedge-taken count is SCEVCouldNotCompute.
912 // Note that this serves two purposes: It filters out loops that are
913 // simply not analyzable, and it covers the case where this code is
914 // being called from within backedge-taken count analysis, such that
915 // attempting to ask for the backedge-taken count would likely result
916 // in infinite recursion. In the later case, the analysis code will
917 // cope with a conservative value, and it will take care to purge
918 // that value once it has finished.
919 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
920 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
921 // Manually compute the final value for AR, checking for
923 const SCEV *Start = AR->getStart();
924 const SCEV *Step = AR->getStepRecurrence(*this);
926 // Check whether the backedge-taken count can be losslessly casted to
927 // the addrec's type. The count is always unsigned.
928 const SCEV *CastedMaxBECount =
929 getTruncateOrZeroExtend(MaxBECount, Start->getType());
930 const SCEV *RecastedMaxBECount =
931 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
932 if (MaxBECount == RecastedMaxBECount) {
934 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
935 // Check whether Start+Step*MaxBECount has no signed overflow.
937 getMulExpr(CastedMaxBECount,
938 getTruncateOrSignExtend(Step, Start->getType()));
939 const SCEV *Add = getAddExpr(Start, SMul);
940 const SCEV *OperandExtendedAdd =
941 getAddExpr(getSignExtendExpr(Start, WideTy),
942 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
943 getSignExtendExpr(Step, WideTy)));
944 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
945 // Return the expression with the addrec on the outside.
946 return getAddRecExpr(getSignExtendExpr(Start, Ty),
947 getSignExtendExpr(Step, Ty),
954 ID.AddInteger(scSignExtend);
958 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
959 SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
960 new (S) SCEVSignExtendExpr(Op, Ty);
961 UniqueSCEVs.InsertNode(S, IP);
965 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
966 /// unspecified bits out to the given type.
968 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
970 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
971 "This is not an extending conversion!");
972 assert(isSCEVable(Ty) &&
973 "This is not a conversion to a SCEVable type!");
974 Ty = getEffectiveSCEVType(Ty);
976 // Sign-extend negative constants.
977 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
978 if (SC->getValue()->getValue().isNegative())
979 return getSignExtendExpr(Op, Ty);
981 // Peel off a truncate cast.
982 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
983 const SCEV *NewOp = T->getOperand();
984 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
985 return getAnyExtendExpr(NewOp, Ty);
986 return getTruncateOrNoop(NewOp, Ty);
989 // Next try a zext cast. If the cast is folded, use it.
990 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
991 if (!isa<SCEVZeroExtendExpr>(ZExt))
994 // Next try a sext cast. If the cast is folded, use it.
995 const SCEV *SExt = getSignExtendExpr(Op, Ty);
996 if (!isa<SCEVSignExtendExpr>(SExt))
999 // If the expression is obviously signed, use the sext cast value.
1000 if (isa<SCEVSMaxExpr>(Op))
1003 // Absent any other information, use the zext cast value.
1007 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1008 /// a list of operands to be added under the given scale, update the given
1009 /// map. This is a helper function for getAddRecExpr. As an example of
1010 /// what it does, given a sequence of operands that would form an add
1011 /// expression like this:
1013 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1015 /// where A and B are constants, update the map with these values:
1017 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1019 /// and add 13 + A*B*29 to AccumulatedConstant.
1020 /// This will allow getAddRecExpr to produce this:
1022 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1024 /// This form often exposes folding opportunities that are hidden in
1025 /// the original operand list.
1027 /// Return true iff it appears that any interesting folding opportunities
1028 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1029 /// the common case where no interesting opportunities are present, and
1030 /// is also used as a check to avoid infinite recursion.
1033 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1034 SmallVector<const SCEV *, 8> &NewOps,
1035 APInt &AccumulatedConstant,
1036 const SmallVectorImpl<const SCEV *> &Ops,
1038 ScalarEvolution &SE) {
1039 bool Interesting = false;
1041 // Iterate over the add operands.
1042 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1043 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1044 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1046 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1047 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1048 // A multiplication of a constant with another add; recurse.
1050 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1051 cast<SCEVAddExpr>(Mul->getOperand(1))
1055 // A multiplication of a constant with some other value. Update
1057 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1058 const SCEV *Key = SE.getMulExpr(MulOps);
1059 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1060 M.insert(std::make_pair(Key, NewScale));
1062 NewOps.push_back(Pair.first->first);
1064 Pair.first->second += NewScale;
1065 // The map already had an entry for this value, which may indicate
1066 // a folding opportunity.
1070 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1071 // Pull a buried constant out to the outside.
1072 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1074 AccumulatedConstant += Scale * C->getValue()->getValue();
1076 // An ordinary operand. Update the map.
1077 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1078 M.insert(std::make_pair(Ops[i], Scale));
1080 NewOps.push_back(Pair.first->first);
1082 Pair.first->second += Scale;
1083 // The map already had an entry for this value, which may indicate
1084 // a folding opportunity.
1094 struct APIntCompare {
1095 bool operator()(const APInt &LHS, const APInt &RHS) const {
1096 return LHS.ult(RHS);
1101 /// getAddExpr - Get a canonical add expression, or something simpler if
1103 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1104 assert(!Ops.empty() && "Cannot get empty add!");
1105 if (Ops.size() == 1) return Ops[0];
1107 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1108 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1109 getEffectiveSCEVType(Ops[0]->getType()) &&
1110 "SCEVAddExpr operand types don't match!");
1113 // Sort by complexity, this groups all similar expression types together.
1114 GroupByComplexity(Ops, LI);
1116 // If there are any constants, fold them together.
1118 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1120 assert(Idx < Ops.size());
1121 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1122 // We found two constants, fold them together!
1123 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1124 RHSC->getValue()->getValue());
1125 if (Ops.size() == 2) return Ops[0];
1126 Ops.erase(Ops.begin()+1); // Erase the folded element
1127 LHSC = cast<SCEVConstant>(Ops[0]);
1130 // If we are left with a constant zero being added, strip it off.
1131 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1132 Ops.erase(Ops.begin());
1137 if (Ops.size() == 1) return Ops[0];
1139 // Okay, check to see if the same value occurs in the operand list twice. If
1140 // so, merge them together into an multiply expression. Since we sorted the
1141 // list, these values are required to be adjacent.
1142 const Type *Ty = Ops[0]->getType();
1143 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1144 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1145 // Found a match, merge the two values into a multiply, and add any
1146 // remaining values to the result.
1147 const SCEV *Two = getIntegerSCEV(2, Ty);
1148 const SCEV *Mul = getMulExpr(Ops[i], Two);
1149 if (Ops.size() == 2)
1151 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1153 return getAddExpr(Ops);
1156 // Check for truncates. If all the operands are truncated from the same
1157 // type, see if factoring out the truncate would permit the result to be
1158 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1159 // if the contents of the resulting outer trunc fold to something simple.
1160 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1161 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1162 const Type *DstType = Trunc->getType();
1163 const Type *SrcType = Trunc->getOperand()->getType();
1164 SmallVector<const SCEV *, 8> LargeOps;
1166 // Check all the operands to see if they can be represented in the
1167 // source type of the truncate.
1168 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1169 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1170 if (T->getOperand()->getType() != SrcType) {
1174 LargeOps.push_back(T->getOperand());
1175 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1176 // This could be either sign or zero extension, but sign extension
1177 // is much more likely to be foldable here.
1178 LargeOps.push_back(getSignExtendExpr(C, SrcType));
1179 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1180 SmallVector<const SCEV *, 8> LargeMulOps;
1181 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1182 if (const SCEVTruncateExpr *T =
1183 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1184 if (T->getOperand()->getType() != SrcType) {
1188 LargeMulOps.push_back(T->getOperand());
1189 } else if (const SCEVConstant *C =
1190 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1191 // This could be either sign or zero extension, but sign extension
1192 // is much more likely to be foldable here.
1193 LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1200 LargeOps.push_back(getMulExpr(LargeMulOps));
1207 // Evaluate the expression in the larger type.
1208 const SCEV *Fold = getAddExpr(LargeOps);
1209 // If it folds to something simple, use it. Otherwise, don't.
1210 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1211 return getTruncateExpr(Fold, DstType);
1215 // Skip past any other cast SCEVs.
1216 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1219 // If there are add operands they would be next.
1220 if (Idx < Ops.size()) {
1221 bool DeletedAdd = false;
1222 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1223 // If we have an add, expand the add operands onto the end of the operands
1225 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1226 Ops.erase(Ops.begin()+Idx);
1230 // If we deleted at least one add, we added operands to the end of the list,
1231 // and they are not necessarily sorted. Recurse to resort and resimplify
1232 // any operands we just aquired.
1234 return getAddExpr(Ops);
1237 // Skip over the add expression until we get to a multiply.
1238 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1241 // Check to see if there are any folding opportunities present with
1242 // operands multiplied by constant values.
1243 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1244 uint64_t BitWidth = getTypeSizeInBits(Ty);
1245 DenseMap<const SCEV *, APInt> M;
1246 SmallVector<const SCEV *, 8> NewOps;
1247 APInt AccumulatedConstant(BitWidth, 0);
1248 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1249 Ops, APInt(BitWidth, 1), *this)) {
1250 // Some interesting folding opportunity is present, so its worthwhile to
1251 // re-generate the operands list. Group the operands by constant scale,
1252 // to avoid multiplying by the same constant scale multiple times.
1253 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1254 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1255 E = NewOps.end(); I != E; ++I)
1256 MulOpLists[M.find(*I)->second].push_back(*I);
1257 // Re-generate the operands list.
1259 if (AccumulatedConstant != 0)
1260 Ops.push_back(getConstant(AccumulatedConstant));
1261 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1262 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1264 Ops.push_back(getMulExpr(getConstant(I->first),
1265 getAddExpr(I->second)));
1267 return getIntegerSCEV(0, Ty);
1268 if (Ops.size() == 1)
1270 return getAddExpr(Ops);
1274 // If we are adding something to a multiply expression, make sure the
1275 // something is not already an operand of the multiply. If so, merge it into
1277 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1278 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1279 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1280 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1281 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1282 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1283 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1284 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1285 if (Mul->getNumOperands() != 2) {
1286 // If the multiply has more than two operands, we must get the
1288 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1289 MulOps.erase(MulOps.begin()+MulOp);
1290 InnerMul = getMulExpr(MulOps);
1292 const SCEV *One = getIntegerSCEV(1, Ty);
1293 const SCEV *AddOne = getAddExpr(InnerMul, One);
1294 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1295 if (Ops.size() == 2) return OuterMul;
1297 Ops.erase(Ops.begin()+AddOp);
1298 Ops.erase(Ops.begin()+Idx-1);
1300 Ops.erase(Ops.begin()+Idx);
1301 Ops.erase(Ops.begin()+AddOp-1);
1303 Ops.push_back(OuterMul);
1304 return getAddExpr(Ops);
1307 // Check this multiply against other multiplies being added together.
1308 for (unsigned OtherMulIdx = Idx+1;
1309 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1311 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1312 // If MulOp occurs in OtherMul, we can fold the two multiplies
1314 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1315 OMulOp != e; ++OMulOp)
1316 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1317 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1318 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1319 if (Mul->getNumOperands() != 2) {
1320 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1322 MulOps.erase(MulOps.begin()+MulOp);
1323 InnerMul1 = getMulExpr(MulOps);
1325 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1326 if (OtherMul->getNumOperands() != 2) {
1327 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1328 OtherMul->op_end());
1329 MulOps.erase(MulOps.begin()+OMulOp);
1330 InnerMul2 = getMulExpr(MulOps);
1332 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1333 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1334 if (Ops.size() == 2) return OuterMul;
1335 Ops.erase(Ops.begin()+Idx);
1336 Ops.erase(Ops.begin()+OtherMulIdx-1);
1337 Ops.push_back(OuterMul);
1338 return getAddExpr(Ops);
1344 // If there are any add recurrences in the operands list, see if any other
1345 // added values are loop invariant. If so, we can fold them into the
1347 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1350 // Scan over all recurrences, trying to fold loop invariants into them.
1351 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1352 // Scan all of the other operands to this add and add them to the vector if
1353 // they are loop invariant w.r.t. the recurrence.
1354 SmallVector<const SCEV *, 8> LIOps;
1355 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1356 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1357 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1358 LIOps.push_back(Ops[i]);
1359 Ops.erase(Ops.begin()+i);
1363 // If we found some loop invariants, fold them into the recurrence.
1364 if (!LIOps.empty()) {
1365 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1366 LIOps.push_back(AddRec->getStart());
1368 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1370 AddRecOps[0] = getAddExpr(LIOps);
1372 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1373 // If all of the other operands were loop invariant, we are done.
1374 if (Ops.size() == 1) return NewRec;
1376 // Otherwise, add the folded AddRec by the non-liv parts.
1377 for (unsigned i = 0;; ++i)
1378 if (Ops[i] == AddRec) {
1382 return getAddExpr(Ops);
1385 // Okay, if there weren't any loop invariants to be folded, check to see if
1386 // there are multiple AddRec's with the same loop induction variable being
1387 // added together. If so, we can fold them.
1388 for (unsigned OtherIdx = Idx+1;
1389 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1390 if (OtherIdx != Idx) {
1391 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1392 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1393 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
1394 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1396 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1397 if (i >= NewOps.size()) {
1398 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1399 OtherAddRec->op_end());
1402 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1404 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1406 if (Ops.size() == 2) return NewAddRec;
1408 Ops.erase(Ops.begin()+Idx);
1409 Ops.erase(Ops.begin()+OtherIdx-1);
1410 Ops.push_back(NewAddRec);
1411 return getAddExpr(Ops);
1415 // Otherwise couldn't fold anything into this recurrence. Move onto the
1419 // Okay, it looks like we really DO need an add expr. Check to see if we
1420 // already have one, otherwise create a new one.
1421 FoldingSetNodeID ID;
1422 ID.AddInteger(scAddExpr);
1423 ID.AddInteger(Ops.size());
1424 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1425 ID.AddPointer(Ops[i]);
1427 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1428 SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1429 new (S) SCEVAddExpr(Ops);
1430 UniqueSCEVs.InsertNode(S, IP);
1435 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1437 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1438 assert(!Ops.empty() && "Cannot get empty mul!");
1440 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1441 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1442 getEffectiveSCEVType(Ops[0]->getType()) &&
1443 "SCEVMulExpr operand types don't match!");
1446 // Sort by complexity, this groups all similar expression types together.
1447 GroupByComplexity(Ops, LI);
1449 // If there are any constants, fold them together.
1451 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1453 // C1*(C2+V) -> C1*C2 + C1*V
1454 if (Ops.size() == 2)
1455 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1456 if (Add->getNumOperands() == 2 &&
1457 isa<SCEVConstant>(Add->getOperand(0)))
1458 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1459 getMulExpr(LHSC, Add->getOperand(1)));
1463 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1464 // We found two constants, fold them together!
1465 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1466 RHSC->getValue()->getValue());
1467 Ops[0] = getConstant(Fold);
1468 Ops.erase(Ops.begin()+1); // Erase the folded element
1469 if (Ops.size() == 1) return Ops[0];
1470 LHSC = cast<SCEVConstant>(Ops[0]);
1473 // If we are left with a constant one being multiplied, strip it off.
1474 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1475 Ops.erase(Ops.begin());
1477 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1478 // If we have a multiply of zero, it will always be zero.
1483 // Skip over the add expression until we get to a multiply.
1484 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1487 if (Ops.size() == 1)
1490 // If there are mul operands inline them all into this expression.
1491 if (Idx < Ops.size()) {
1492 bool DeletedMul = false;
1493 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1494 // If we have an mul, expand the mul operands onto the end of the operands
1496 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1497 Ops.erase(Ops.begin()+Idx);
1501 // If we deleted at least one mul, we added operands to the end of the list,
1502 // and they are not necessarily sorted. Recurse to resort and resimplify
1503 // any operands we just aquired.
1505 return getMulExpr(Ops);
1508 // If there are any add recurrences in the operands list, see if any other
1509 // added values are loop invariant. If so, we can fold them into the
1511 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1514 // Scan over all recurrences, trying to fold loop invariants into them.
1515 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1516 // Scan all of the other operands to this mul and add them to the vector if
1517 // they are loop invariant w.r.t. the recurrence.
1518 SmallVector<const SCEV *, 8> LIOps;
1519 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1520 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1521 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1522 LIOps.push_back(Ops[i]);
1523 Ops.erase(Ops.begin()+i);
1527 // If we found some loop invariants, fold them into the recurrence.
1528 if (!LIOps.empty()) {
1529 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1530 SmallVector<const SCEV *, 4> NewOps;
1531 NewOps.reserve(AddRec->getNumOperands());
1532 if (LIOps.size() == 1) {
1533 const SCEV *Scale = LIOps[0];
1534 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1535 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1537 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1538 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1539 MulOps.push_back(AddRec->getOperand(i));
1540 NewOps.push_back(getMulExpr(MulOps));
1544 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1546 // If all of the other operands were loop invariant, we are done.
1547 if (Ops.size() == 1) return NewRec;
1549 // Otherwise, multiply the folded AddRec by the non-liv parts.
1550 for (unsigned i = 0;; ++i)
1551 if (Ops[i] == AddRec) {
1555 return getMulExpr(Ops);
1558 // Okay, if there weren't any loop invariants to be folded, check to see if
1559 // there are multiple AddRec's with the same loop induction variable being
1560 // multiplied together. If so, we can fold them.
1561 for (unsigned OtherIdx = Idx+1;
1562 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1563 if (OtherIdx != Idx) {
1564 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1565 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1566 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
1567 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1568 const SCEV *NewStart = getMulExpr(F->getStart(),
1570 const SCEV *B = F->getStepRecurrence(*this);
1571 const SCEV *D = G->getStepRecurrence(*this);
1572 const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1575 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1577 if (Ops.size() == 2) return NewAddRec;
1579 Ops.erase(Ops.begin()+Idx);
1580 Ops.erase(Ops.begin()+OtherIdx-1);
1581 Ops.push_back(NewAddRec);
1582 return getMulExpr(Ops);
1586 // Otherwise couldn't fold anything into this recurrence. Move onto the
1590 // Okay, it looks like we really DO need an mul expr. Check to see if we
1591 // already have one, otherwise create a new one.
1592 FoldingSetNodeID ID;
1593 ID.AddInteger(scMulExpr);
1594 ID.AddInteger(Ops.size());
1595 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1596 ID.AddPointer(Ops[i]);
1598 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1599 SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1600 new (S) SCEVMulExpr(Ops);
1601 UniqueSCEVs.InsertNode(S, IP);
1605 /// getUDivExpr - Get a canonical multiply expression, or something simpler if
1607 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1609 assert(getEffectiveSCEVType(LHS->getType()) ==
1610 getEffectiveSCEVType(RHS->getType()) &&
1611 "SCEVUDivExpr operand types don't match!");
1613 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1614 if (RHSC->getValue()->equalsInt(1))
1615 return LHS; // X udiv 1 --> x
1617 return getIntegerSCEV(0, LHS->getType()); // value is undefined
1619 // Determine if the division can be folded into the operands of
1621 // TODO: Generalize this to non-constants by using known-bits information.
1622 const Type *Ty = LHS->getType();
1623 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1624 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1625 // For non-power-of-two values, effectively round the value up to the
1626 // nearest power of two.
1627 if (!RHSC->getValue()->getValue().isPowerOf2())
1629 const IntegerType *ExtTy =
1630 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1631 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1632 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1633 if (const SCEVConstant *Step =
1634 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1635 if (!Step->getValue()->getValue()
1636 .urem(RHSC->getValue()->getValue()) &&
1637 getZeroExtendExpr(AR, ExtTy) ==
1638 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1639 getZeroExtendExpr(Step, ExtTy),
1641 SmallVector<const SCEV *, 4> Operands;
1642 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1643 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1644 return getAddRecExpr(Operands, AR->getLoop());
1646 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1647 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1648 SmallVector<const SCEV *, 4> Operands;
1649 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1650 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1651 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1652 // Find an operand that's safely divisible.
1653 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1654 const SCEV *Op = M->getOperand(i);
1655 const SCEV *Div = getUDivExpr(Op, RHSC);
1656 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1657 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1658 Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1661 return getMulExpr(Operands);
1665 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1666 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1667 SmallVector<const SCEV *, 4> Operands;
1668 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1669 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1670 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1672 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1673 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1674 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1676 Operands.push_back(Op);
1678 if (Operands.size() == A->getNumOperands())
1679 return getAddExpr(Operands);
1683 // Fold if both operands are constant.
1684 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1685 Constant *LHSCV = LHSC->getValue();
1686 Constant *RHSCV = RHSC->getValue();
1687 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1692 FoldingSetNodeID ID;
1693 ID.AddInteger(scUDivExpr);
1697 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1698 SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1699 new (S) SCEVUDivExpr(LHS, RHS);
1700 UniqueSCEVs.InsertNode(S, IP);
1705 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1706 /// Simplify the expression as much as possible.
1707 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1708 const SCEV *Step, const Loop *L) {
1709 SmallVector<const SCEV *, 4> Operands;
1710 Operands.push_back(Start);
1711 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1712 if (StepChrec->getLoop() == L) {
1713 Operands.insert(Operands.end(), StepChrec->op_begin(),
1714 StepChrec->op_end());
1715 return getAddRecExpr(Operands, L);
1718 Operands.push_back(Step);
1719 return getAddRecExpr(Operands, L);
1722 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1723 /// Simplify the expression as much as possible.
1725 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1727 if (Operands.size() == 1) return Operands[0];
1729 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1730 assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1731 getEffectiveSCEVType(Operands[0]->getType()) &&
1732 "SCEVAddRecExpr operand types don't match!");
1735 if (Operands.back()->isZero()) {
1736 Operands.pop_back();
1737 return getAddRecExpr(Operands, L); // {X,+,0} --> X
1740 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1741 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1742 const Loop* NestedLoop = NestedAR->getLoop();
1743 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1744 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1745 NestedAR->op_end());
1746 Operands[0] = NestedAR->getStart();
1747 // AddRecs require their operands be loop-invariant with respect to their
1748 // loops. Don't perform this transformation if it would break this
1750 bool AllInvariant = true;
1751 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1752 if (!Operands[i]->isLoopInvariant(L)) {
1753 AllInvariant = false;
1757 NestedOperands[0] = getAddRecExpr(Operands, L);
1758 AllInvariant = true;
1759 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1760 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1761 AllInvariant = false;
1765 // Ok, both add recurrences are valid after the transformation.
1766 return getAddRecExpr(NestedOperands, NestedLoop);
1768 // Reset Operands to its original state.
1769 Operands[0] = NestedAR;
1773 FoldingSetNodeID ID;
1774 ID.AddInteger(scAddRecExpr);
1775 ID.AddInteger(Operands.size());
1776 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1777 ID.AddPointer(Operands[i]);
1780 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1781 SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1782 new (S) SCEVAddRecExpr(Operands, L);
1783 UniqueSCEVs.InsertNode(S, IP);
1787 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1789 SmallVector<const SCEV *, 2> Ops;
1792 return getSMaxExpr(Ops);
1796 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1797 assert(!Ops.empty() && "Cannot get empty smax!");
1798 if (Ops.size() == 1) return Ops[0];
1800 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1801 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1802 getEffectiveSCEVType(Ops[0]->getType()) &&
1803 "SCEVSMaxExpr operand types don't match!");
1806 // Sort by complexity, this groups all similar expression types together.
1807 GroupByComplexity(Ops, LI);
1809 // If there are any constants, fold them together.
1811 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1813 assert(Idx < Ops.size());
1814 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1815 // We found two constants, fold them together!
1816 ConstantInt *Fold = ConstantInt::get(
1817 APIntOps::smax(LHSC->getValue()->getValue(),
1818 RHSC->getValue()->getValue()));
1819 Ops[0] = getConstant(Fold);
1820 Ops.erase(Ops.begin()+1); // Erase the folded element
1821 if (Ops.size() == 1) return Ops[0];
1822 LHSC = cast<SCEVConstant>(Ops[0]);
1825 // If we are left with a constant minimum-int, strip it off.
1826 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1827 Ops.erase(Ops.begin());
1829 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1830 // If we have an smax with a constant maximum-int, it will always be
1836 if (Ops.size() == 1) return Ops[0];
1838 // Find the first SMax
1839 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1842 // Check to see if one of the operands is an SMax. If so, expand its operands
1843 // onto our operand list, and recurse to simplify.
1844 if (Idx < Ops.size()) {
1845 bool DeletedSMax = false;
1846 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1847 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1848 Ops.erase(Ops.begin()+Idx);
1853 return getSMaxExpr(Ops);
1856 // Okay, check to see if the same value occurs in the operand list twice. If
1857 // so, delete one. Since we sorted the list, these values are required to
1859 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1860 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
1861 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1865 if (Ops.size() == 1) return Ops[0];
1867 assert(!Ops.empty() && "Reduced smax down to nothing!");
1869 // Okay, it looks like we really DO need an smax expr. Check to see if we
1870 // already have one, otherwise create a new one.
1871 FoldingSetNodeID ID;
1872 ID.AddInteger(scSMaxExpr);
1873 ID.AddInteger(Ops.size());
1874 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1875 ID.AddPointer(Ops[i]);
1877 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1878 SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1879 new (S) SCEVSMaxExpr(Ops);
1880 UniqueSCEVs.InsertNode(S, IP);
1884 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1886 SmallVector<const SCEV *, 2> Ops;
1889 return getUMaxExpr(Ops);
1893 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1894 assert(!Ops.empty() && "Cannot get empty umax!");
1895 if (Ops.size() == 1) return Ops[0];
1897 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1898 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1899 getEffectiveSCEVType(Ops[0]->getType()) &&
1900 "SCEVUMaxExpr operand types don't match!");
1903 // Sort by complexity, this groups all similar expression types together.
1904 GroupByComplexity(Ops, LI);
1906 // If there are any constants, fold them together.
1908 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1910 assert(Idx < Ops.size());
1911 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1912 // We found two constants, fold them together!
1913 ConstantInt *Fold = ConstantInt::get(
1914 APIntOps::umax(LHSC->getValue()->getValue(),
1915 RHSC->getValue()->getValue()));
1916 Ops[0] = getConstant(Fold);
1917 Ops.erase(Ops.begin()+1); // Erase the folded element
1918 if (Ops.size() == 1) return Ops[0];
1919 LHSC = cast<SCEVConstant>(Ops[0]);
1922 // If we are left with a constant minimum-int, strip it off.
1923 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1924 Ops.erase(Ops.begin());
1926 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1927 // If we have an umax with a constant maximum-int, it will always be
1933 if (Ops.size() == 1) return Ops[0];
1935 // Find the first UMax
1936 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1939 // Check to see if one of the operands is a UMax. If so, expand its operands
1940 // onto our operand list, and recurse to simplify.
1941 if (Idx < Ops.size()) {
1942 bool DeletedUMax = false;
1943 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1944 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1945 Ops.erase(Ops.begin()+Idx);
1950 return getUMaxExpr(Ops);
1953 // Okay, check to see if the same value occurs in the operand list twice. If
1954 // so, delete one. Since we sorted the list, these values are required to
1956 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1957 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
1958 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1962 if (Ops.size() == 1) return Ops[0];
1964 assert(!Ops.empty() && "Reduced umax down to nothing!");
1966 // Okay, it looks like we really DO need a umax expr. Check to see if we
1967 // already have one, otherwise create a new one.
1968 FoldingSetNodeID ID;
1969 ID.AddInteger(scUMaxExpr);
1970 ID.AddInteger(Ops.size());
1971 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1972 ID.AddPointer(Ops[i]);
1974 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1975 SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
1976 new (S) SCEVUMaxExpr(Ops);
1977 UniqueSCEVs.InsertNode(S, IP);
1981 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1983 // ~smax(~x, ~y) == smin(x, y).
1984 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1987 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1989 // ~umax(~x, ~y) == umin(x, y)
1990 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1993 const SCEV *ScalarEvolution::getUnknown(Value *V) {
1994 // Don't attempt to do anything other than create a SCEVUnknown object
1995 // here. createSCEV only calls getUnknown after checking for all other
1996 // interesting possibilities, and any other code that calls getUnknown
1997 // is doing so in order to hide a value from SCEV canonicalization.
1999 FoldingSetNodeID ID;
2000 ID.AddInteger(scUnknown);
2003 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2004 SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2005 new (S) SCEVUnknown(V);
2006 UniqueSCEVs.InsertNode(S, IP);
2010 //===----------------------------------------------------------------------===//
2011 // Basic SCEV Analysis and PHI Idiom Recognition Code
2014 /// isSCEVable - Test if values of the given type are analyzable within
2015 /// the SCEV framework. This primarily includes integer types, and it
2016 /// can optionally include pointer types if the ScalarEvolution class
2017 /// has access to target-specific information.
2018 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2019 // Integers are always SCEVable.
2020 if (Ty->isInteger())
2023 // Pointers are SCEVable if TargetData information is available
2024 // to provide pointer size information.
2025 if (isa<PointerType>(Ty))
2028 // Otherwise it's not SCEVable.
2032 /// getTypeSizeInBits - Return the size in bits of the specified type,
2033 /// for which isSCEVable must return true.
2034 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2035 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2037 // If we have a TargetData, use it!
2039 return TD->getTypeSizeInBits(Ty);
2041 // Otherwise, we support only integer types.
2042 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2043 return Ty->getPrimitiveSizeInBits();
2046 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2047 /// the given type and which represents how SCEV will treat the given
2048 /// type, for which isSCEVable must return true. For pointer types,
2049 /// this is the pointer-sized integer type.
2050 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2051 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2053 if (Ty->isInteger())
2056 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2057 return TD->getIntPtrType();
2060 const SCEV *ScalarEvolution::getCouldNotCompute() {
2061 return &CouldNotCompute;
2064 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2065 /// expression and create a new one.
2066 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2067 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2069 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2070 if (I != Scalars.end()) return I->second;
2071 const SCEV *S = createSCEV(V);
2072 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2076 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
2077 /// specified signed integer value and return a SCEV for the constant.
2078 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2079 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2080 return getConstant(ConstantInt::get(ITy, Val));
2083 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2085 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2086 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2087 return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2089 const Type *Ty = V->getType();
2090 Ty = getEffectiveSCEVType(Ty);
2091 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
2094 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2095 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2096 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2097 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2099 const Type *Ty = V->getType();
2100 Ty = getEffectiveSCEVType(Ty);
2101 const SCEV *AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
2102 return getMinusSCEV(AllOnes, V);
2105 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2107 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2110 return getAddExpr(LHS, getNegativeSCEV(RHS));
2113 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2114 /// input value to the specified type. If the type must be extended, it is zero
2117 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2119 const Type *SrcTy = V->getType();
2120 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2121 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2122 "Cannot truncate or zero extend with non-integer arguments!");
2123 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2124 return V; // No conversion
2125 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2126 return getTruncateExpr(V, Ty);
2127 return getZeroExtendExpr(V, Ty);
2130 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2131 /// input value to the specified type. If the type must be extended, it is sign
2134 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2136 const Type *SrcTy = V->getType();
2137 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2138 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2139 "Cannot truncate or zero extend with non-integer arguments!");
2140 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2141 return V; // No conversion
2142 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2143 return getTruncateExpr(V, Ty);
2144 return getSignExtendExpr(V, Ty);
2147 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2148 /// input value to the specified type. If the type must be extended, it is zero
2149 /// extended. The conversion must not be narrowing.
2151 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2152 const Type *SrcTy = V->getType();
2153 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2154 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2155 "Cannot noop or zero extend with non-integer arguments!");
2156 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2157 "getNoopOrZeroExtend cannot truncate!");
2158 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2159 return V; // No conversion
2160 return getZeroExtendExpr(V, Ty);
2163 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2164 /// input value to the specified type. If the type must be extended, it is sign
2165 /// extended. The conversion must not be narrowing.
2167 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2168 const Type *SrcTy = V->getType();
2169 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2170 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2171 "Cannot noop or sign extend with non-integer arguments!");
2172 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2173 "getNoopOrSignExtend cannot truncate!");
2174 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2175 return V; // No conversion
2176 return getSignExtendExpr(V, Ty);
2179 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2180 /// the input value to the specified type. If the type must be extended,
2181 /// it is extended with unspecified bits. The conversion must not be
2184 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2185 const Type *SrcTy = V->getType();
2186 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2187 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2188 "Cannot noop or any extend with non-integer arguments!");
2189 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2190 "getNoopOrAnyExtend cannot truncate!");
2191 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2192 return V; // No conversion
2193 return getAnyExtendExpr(V, Ty);
2196 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2197 /// input value to the specified type. The conversion must not be widening.
2199 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2200 const Type *SrcTy = V->getType();
2201 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2202 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2203 "Cannot truncate or noop with non-integer arguments!");
2204 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2205 "getTruncateOrNoop cannot extend!");
2206 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2207 return V; // No conversion
2208 return getTruncateExpr(V, Ty);
2211 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2212 /// the types using zero-extension, and then perform a umax operation
2214 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2216 const SCEV *PromotedLHS = LHS;
2217 const SCEV *PromotedRHS = RHS;
2219 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2220 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2222 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2224 return getUMaxExpr(PromotedLHS, PromotedRHS);
2227 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2228 /// the types using zero-extension, and then perform a umin operation
2230 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2232 const SCEV *PromotedLHS = LHS;
2233 const SCEV *PromotedRHS = RHS;
2235 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2236 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2238 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2240 return getUMinExpr(PromotedLHS, PromotedRHS);
2243 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2244 /// the specified instruction and replaces any references to the symbolic value
2245 /// SymName with the specified value. This is used during PHI resolution.
2247 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2248 const SCEV *SymName,
2249 const SCEV *NewVal) {
2250 std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2251 Scalars.find(SCEVCallbackVH(I, this));
2252 if (SI == Scalars.end()) return;
2255 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2256 if (NV == SI->second) return; // No change.
2258 SI->second = NV; // Update the scalars map!
2260 // Any instruction values that use this instruction might also need to be
2262 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2264 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2267 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2268 /// a loop header, making it a potential recurrence, or it doesn't.
2270 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2271 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
2272 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2273 if (L->getHeader() == PN->getParent()) {
2274 // If it lives in the loop header, it has two incoming values, one
2275 // from outside the loop, and one from inside.
2276 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2277 unsigned BackEdge = IncomingEdge^1;
2279 // While we are analyzing this PHI node, handle its value symbolically.
2280 const SCEV *SymbolicName = getUnknown(PN);
2281 assert(Scalars.find(PN) == Scalars.end() &&
2282 "PHI node already processed?");
2283 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2285 // Using this symbolic name for the PHI, analyze the value coming around
2287 const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2289 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2290 // has a special value for the first iteration of the loop.
2292 // If the value coming around the backedge is an add with the symbolic
2293 // value we just inserted, then we found a simple induction variable!
2294 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2295 // If there is a single occurrence of the symbolic value, replace it
2296 // with a recurrence.
2297 unsigned FoundIndex = Add->getNumOperands();
2298 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2299 if (Add->getOperand(i) == SymbolicName)
2300 if (FoundIndex == e) {
2305 if (FoundIndex != Add->getNumOperands()) {
2306 // Create an add with everything but the specified operand.
2307 SmallVector<const SCEV *, 8> Ops;
2308 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2309 if (i != FoundIndex)
2310 Ops.push_back(Add->getOperand(i));
2311 const SCEV *Accum = getAddExpr(Ops);
2313 // This is not a valid addrec if the step amount is varying each
2314 // loop iteration, but is not itself an addrec in this loop.
2315 if (Accum->isLoopInvariant(L) ||
2316 (isa<SCEVAddRecExpr>(Accum) &&
2317 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2318 const SCEV *StartVal =
2319 getSCEV(PN->getIncomingValue(IncomingEdge));
2320 const SCEV *PHISCEV =
2321 getAddRecExpr(StartVal, Accum, L);
2323 // Okay, for the entire analysis of this edge we assumed the PHI
2324 // to be symbolic. We now need to go back and update all of the
2325 // entries for the scalars that use the PHI (except for the PHI
2326 // itself) to use the new analyzed value instead of the "symbolic"
2328 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2332 } else if (const SCEVAddRecExpr *AddRec =
2333 dyn_cast<SCEVAddRecExpr>(BEValue)) {
2334 // Otherwise, this could be a loop like this:
2335 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2336 // In this case, j = {1,+,1} and BEValue is j.
2337 // Because the other in-value of i (0) fits the evolution of BEValue
2338 // i really is an addrec evolution.
2339 if (AddRec->getLoop() == L && AddRec->isAffine()) {
2340 const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2342 // If StartVal = j.start - j.stride, we can use StartVal as the
2343 // initial step of the addrec evolution.
2344 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2345 AddRec->getOperand(1))) {
2346 const SCEV *PHISCEV =
2347 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2349 // Okay, for the entire analysis of this edge we assumed the PHI
2350 // to be symbolic. We now need to go back and update all of the
2351 // entries for the scalars that use the PHI (except for the PHI
2352 // itself) to use the new analyzed value instead of the "symbolic"
2354 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2360 return SymbolicName;
2363 // If it's not a loop phi, we can't handle it yet.
2364 return getUnknown(PN);
2367 /// createNodeForGEP - Expand GEP instructions into add and multiply
2368 /// operations. This allows them to be analyzed by regular SCEV code.
2370 const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) {
2372 const Type *IntPtrTy = TD->getIntPtrType();
2373 Value *Base = GEP->getOperand(0);
2374 // Don't attempt to analyze GEPs over unsized objects.
2375 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2376 return getUnknown(GEP);
2377 const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2378 gep_type_iterator GTI = gep_type_begin(GEP);
2379 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2383 // Compute the (potentially symbolic) offset in bytes for this index.
2384 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2385 // For a struct, add the member offset.
2386 const StructLayout &SL = *TD->getStructLayout(STy);
2387 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2388 uint64_t Offset = SL.getElementOffset(FieldNo);
2389 TotalOffset = getAddExpr(TotalOffset,
2390 getIntegerSCEV(Offset, IntPtrTy));
2392 // For an array, add the element offset, explicitly scaled.
2393 const SCEV *LocalOffset = getSCEV(Index);
2394 if (!isa<PointerType>(LocalOffset->getType()))
2395 // Getelementptr indicies are signed.
2396 LocalOffset = getTruncateOrSignExtend(LocalOffset,
2399 getMulExpr(LocalOffset,
2400 getIntegerSCEV(TD->getTypeAllocSize(*GTI),
2402 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2405 return getAddExpr(getSCEV(Base), TotalOffset);
2408 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2409 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2410 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2411 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2413 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2414 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2415 return C->getValue()->getValue().countTrailingZeros();
2417 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2418 return std::min(GetMinTrailingZeros(T->getOperand()),
2419 (uint32_t)getTypeSizeInBits(T->getType()));
2421 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2422 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2423 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2424 getTypeSizeInBits(E->getType()) : OpRes;
2427 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2428 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2429 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2430 getTypeSizeInBits(E->getType()) : OpRes;
2433 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2434 // The result is the min of all operands results.
2435 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2436 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2437 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2441 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2442 // The result is the sum of all operands results.
2443 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2444 uint32_t BitWidth = getTypeSizeInBits(M->getType());
2445 for (unsigned i = 1, e = M->getNumOperands();
2446 SumOpRes != BitWidth && i != e; ++i)
2447 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2452 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2453 // The result is the min of all operands results.
2454 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2455 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2456 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2460 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2461 // The result is the min of all operands results.
2462 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2463 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2464 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2468 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2469 // The result is the min of all operands results.
2470 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2471 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2472 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2476 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2477 // For a SCEVUnknown, ask ValueTracking.
2478 unsigned BitWidth = getTypeSizeInBits(U->getType());
2479 APInt Mask = APInt::getAllOnesValue(BitWidth);
2480 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2481 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2482 return Zeros.countTrailingOnes();
2490 ScalarEvolution::GetMinLeadingZeros(const SCEV *S) {
2491 // TODO: Handle other SCEV expression types here.
2493 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2494 return C->getValue()->getValue().countLeadingZeros();
2496 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) {
2497 // A zero-extension cast adds zero bits.
2498 return GetMinLeadingZeros(C->getOperand()) +
2499 (getTypeSizeInBits(C->getType()) -
2500 getTypeSizeInBits(C->getOperand()->getType()));
2503 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2504 // For a SCEVUnknown, ask ValueTracking.
2505 unsigned BitWidth = getTypeSizeInBits(U->getType());
2506 APInt Mask = APInt::getAllOnesValue(BitWidth);
2507 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2508 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2509 return Zeros.countLeadingOnes();
2516 ScalarEvolution::GetMinSignBits(const SCEV *S) {
2517 // TODO: Handle other SCEV expression types here.
2519 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
2520 const APInt &A = C->getValue()->getValue();
2521 return A.isNegative() ? A.countLeadingOnes() :
2522 A.countLeadingZeros();
2525 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) {
2526 // A sign-extension cast adds sign bits.
2527 return GetMinSignBits(C->getOperand()) +
2528 (getTypeSizeInBits(C->getType()) -
2529 getTypeSizeInBits(C->getOperand()->getType()));
2532 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2533 unsigned BitWidth = getTypeSizeInBits(A->getType());
2535 // Special case decrementing a value (ADD X, -1):
2536 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0)))
2537 if (CRHS->isAllOnesValue()) {
2538 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end());
2539 const SCEV *OtherOpsAdd = getAddExpr(OtherOps);
2540 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd);
2542 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2544 if (LZ == BitWidth - 1)
2547 // If we are subtracting one from a positive number, there is no carry
2548 // out of the result.
2550 return GetMinSignBits(OtherOpsAdd);
2553 // Add can have at most one carry bit. Thus we know that the output
2554 // is, at worst, one more bit than the inputs.
2555 unsigned Min = BitWidth;
2556 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2557 unsigned N = GetMinSignBits(A->getOperand(i));
2558 Min = std::min(Min, N) - 1;
2559 if (Min == 0) return 1;
2564 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2565 // For a SCEVUnknown, ask ValueTracking.
2566 return ComputeNumSignBits(U->getValue(), TD);
2572 /// createSCEV - We know that there is no SCEV for the specified value.
2573 /// Analyze the expression.
2575 const SCEV *ScalarEvolution::createSCEV(Value *V) {
2576 if (!isSCEVable(V->getType()))
2577 return getUnknown(V);
2579 unsigned Opcode = Instruction::UserOp1;
2580 if (Instruction *I = dyn_cast<Instruction>(V))
2581 Opcode = I->getOpcode();
2582 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2583 Opcode = CE->getOpcode();
2584 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2585 return getConstant(CI);
2586 else if (isa<ConstantPointerNull>(V))
2587 return getIntegerSCEV(0, V->getType());
2588 else if (isa<UndefValue>(V))
2589 return getIntegerSCEV(0, V->getType());
2591 return getUnknown(V);
2593 User *U = cast<User>(V);
2595 case Instruction::Add:
2596 return getAddExpr(getSCEV(U->getOperand(0)),
2597 getSCEV(U->getOperand(1)));
2598 case Instruction::Mul:
2599 return getMulExpr(getSCEV(U->getOperand(0)),
2600 getSCEV(U->getOperand(1)));
2601 case Instruction::UDiv:
2602 return getUDivExpr(getSCEV(U->getOperand(0)),
2603 getSCEV(U->getOperand(1)));
2604 case Instruction::Sub:
2605 return getMinusSCEV(getSCEV(U->getOperand(0)),
2606 getSCEV(U->getOperand(1)));
2607 case Instruction::And:
2608 // For an expression like x&255 that merely masks off the high bits,
2609 // use zext(trunc(x)) as the SCEV expression.
2610 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2611 if (CI->isNullValue())
2612 return getSCEV(U->getOperand(1));
2613 if (CI->isAllOnesValue())
2614 return getSCEV(U->getOperand(0));
2615 const APInt &A = CI->getValue();
2617 // Instcombine's ShrinkDemandedConstant may strip bits out of
2618 // constants, obscuring what would otherwise be a low-bits mask.
2619 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2620 // knew about to reconstruct a low-bits mask value.
2621 unsigned LZ = A.countLeadingZeros();
2622 unsigned BitWidth = A.getBitWidth();
2623 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2624 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2625 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2627 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2629 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2631 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2632 IntegerType::get(BitWidth - LZ)),
2637 case Instruction::Or:
2638 // If the RHS of the Or is a constant, we may have something like:
2639 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
2640 // optimizations will transparently handle this case.
2642 // In order for this transformation to be safe, the LHS must be of the
2643 // form X*(2^n) and the Or constant must be less than 2^n.
2644 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2645 const SCEV *LHS = getSCEV(U->getOperand(0));
2646 const APInt &CIVal = CI->getValue();
2647 if (GetMinTrailingZeros(LHS) >=
2648 (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2649 return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2652 case Instruction::Xor:
2653 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2654 // If the RHS of the xor is a signbit, then this is just an add.
2655 // Instcombine turns add of signbit into xor as a strength reduction step.
2656 if (CI->getValue().isSignBit())
2657 return getAddExpr(getSCEV(U->getOperand(0)),
2658 getSCEV(U->getOperand(1)));
2660 // If the RHS of xor is -1, then this is a not operation.
2661 if (CI->isAllOnesValue())
2662 return getNotSCEV(getSCEV(U->getOperand(0)));
2664 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2665 // This is a variant of the check for xor with -1, and it handles
2666 // the case where instcombine has trimmed non-demanded bits out
2667 // of an xor with -1.
2668 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2669 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2670 if (BO->getOpcode() == Instruction::And &&
2671 LCI->getValue() == CI->getValue())
2672 if (const SCEVZeroExtendExpr *Z =
2673 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2674 const Type *UTy = U->getType();
2675 const SCEV *Z0 = Z->getOperand();
2676 const Type *Z0Ty = Z0->getType();
2677 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2679 // If C is a low-bits mask, the zero extend is zerving to
2680 // mask off the high bits. Complement the operand and
2681 // re-apply the zext.
2682 if (APIntOps::isMask(Z0TySize, CI->getValue()))
2683 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2685 // If C is a single bit, it may be in the sign-bit position
2686 // before the zero-extend. In this case, represent the xor
2687 // using an add, which is equivalent, and re-apply the zext.
2688 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2689 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2691 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2697 case Instruction::Shl:
2698 // Turn shift left of a constant amount into a multiply.
2699 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2700 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2701 Constant *X = ConstantInt::get(
2702 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2703 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2707 case Instruction::LShr:
2708 // Turn logical shift right of a constant into a unsigned divide.
2709 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2710 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2711 Constant *X = ConstantInt::get(
2712 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2713 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2717 case Instruction::AShr:
2718 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2719 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2720 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2721 if (L->getOpcode() == Instruction::Shl &&
2722 L->getOperand(1) == U->getOperand(1)) {
2723 unsigned BitWidth = getTypeSizeInBits(U->getType());
2724 uint64_t Amt = BitWidth - CI->getZExtValue();
2725 if (Amt == BitWidth)
2726 return getSCEV(L->getOperand(0)); // shift by zero --> noop
2728 return getIntegerSCEV(0, U->getType()); // value is undefined
2730 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2731 IntegerType::get(Amt)),
2736 case Instruction::Trunc:
2737 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2739 case Instruction::ZExt:
2740 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2742 case Instruction::SExt:
2743 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2745 case Instruction::BitCast:
2746 // BitCasts are no-op casts so we just eliminate the cast.
2747 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2748 return getSCEV(U->getOperand(0));
2751 case Instruction::IntToPtr:
2752 if (!TD) break; // Without TD we can't analyze pointers.
2753 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2754 TD->getIntPtrType());
2756 case Instruction::PtrToInt:
2757 if (!TD) break; // Without TD we can't analyze pointers.
2758 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2761 case Instruction::GetElementPtr:
2762 if (!TD) break; // Without TD we can't analyze pointers.
2763 return createNodeForGEP(U);
2765 case Instruction::PHI:
2766 return createNodeForPHI(cast<PHINode>(U));
2768 case Instruction::Select:
2769 // This could be a smax or umax that was lowered earlier.
2770 // Try to recover it.
2771 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2772 Value *LHS = ICI->getOperand(0);
2773 Value *RHS = ICI->getOperand(1);
2774 switch (ICI->getPredicate()) {
2775 case ICmpInst::ICMP_SLT:
2776 case ICmpInst::ICMP_SLE:
2777 std::swap(LHS, RHS);
2779 case ICmpInst::ICMP_SGT:
2780 case ICmpInst::ICMP_SGE:
2781 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2782 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2783 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2784 return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2786 case ICmpInst::ICMP_ULT:
2787 case ICmpInst::ICMP_ULE:
2788 std::swap(LHS, RHS);
2790 case ICmpInst::ICMP_UGT:
2791 case ICmpInst::ICMP_UGE:
2792 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2793 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2794 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2795 return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2797 case ICmpInst::ICMP_NE:
2798 // n != 0 ? n : 1 -> umax(n, 1)
2799 if (LHS == U->getOperand(1) &&
2800 isa<ConstantInt>(U->getOperand(2)) &&
2801 cast<ConstantInt>(U->getOperand(2))->isOne() &&
2802 isa<ConstantInt>(RHS) &&
2803 cast<ConstantInt>(RHS)->isZero())
2804 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2806 case ICmpInst::ICMP_EQ:
2807 // n == 0 ? 1 : n -> umax(n, 1)
2808 if (LHS == U->getOperand(2) &&
2809 isa<ConstantInt>(U->getOperand(1)) &&
2810 cast<ConstantInt>(U->getOperand(1))->isOne() &&
2811 isa<ConstantInt>(RHS) &&
2812 cast<ConstantInt>(RHS)->isZero())
2813 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2820 default: // We cannot analyze this expression.
2824 return getUnknown(V);
2829 //===----------------------------------------------------------------------===//
2830 // Iteration Count Computation Code
2833 /// getBackedgeTakenCount - If the specified loop has a predictable
2834 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2835 /// object. The backedge-taken count is the number of times the loop header
2836 /// will be branched to from within the loop. This is one less than the
2837 /// trip count of the loop, since it doesn't count the first iteration,
2838 /// when the header is branched to from outside the loop.
2840 /// Note that it is not valid to call this method on a loop without a
2841 /// loop-invariant backedge-taken count (see
2842 /// hasLoopInvariantBackedgeTakenCount).
2844 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
2845 return getBackedgeTakenInfo(L).Exact;
2848 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
2849 /// return the least SCEV value that is known never to be less than the
2850 /// actual backedge taken count.
2851 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
2852 return getBackedgeTakenInfo(L).Max;
2855 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
2856 /// onto the given Worklist.
2858 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
2859 BasicBlock *Header = L->getHeader();
2861 // Push all Loop-header PHIs onto the Worklist stack.
2862 for (BasicBlock::iterator I = Header->begin();
2863 PHINode *PN = dyn_cast<PHINode>(I); ++I)
2864 Worklist.push_back(PN);
2867 /// PushDefUseChildren - Push users of the given Instruction
2868 /// onto the given Worklist.
2870 PushDefUseChildren(Instruction *I,
2871 SmallVectorImpl<Instruction *> &Worklist) {
2872 // Push the def-use children onto the Worklist stack.
2873 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2875 Worklist.push_back(cast<Instruction>(UI));
2878 const ScalarEvolution::BackedgeTakenInfo &
2879 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
2880 // Initially insert a CouldNotCompute for this loop. If the insertion
2881 // succeeds, procede to actually compute a backedge-taken count and
2882 // update the value. The temporary CouldNotCompute value tells SCEV
2883 // code elsewhere that it shouldn't attempt to request a new
2884 // backedge-taken count, which could result in infinite recursion.
2885 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
2886 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
2888 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
2889 if (ItCount.Exact != getCouldNotCompute()) {
2890 assert(ItCount.Exact->isLoopInvariant(L) &&
2891 ItCount.Max->isLoopInvariant(L) &&
2892 "Computed trip count isn't loop invariant for loop!");
2893 ++NumTripCountsComputed;
2895 // Update the value in the map.
2896 Pair.first->second = ItCount;
2898 if (ItCount.Max != getCouldNotCompute())
2899 // Update the value in the map.
2900 Pair.first->second = ItCount;
2901 if (isa<PHINode>(L->getHeader()->begin()))
2902 // Only count loops that have phi nodes as not being computable.
2903 ++NumTripCountsNotComputed;
2906 // Now that we know more about the trip count for this loop, forget any
2907 // existing SCEV values for PHI nodes in this loop since they are only
2908 // conservative estimates made without the benefit of trip count
2909 // information. This is similar to the code in
2910 // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
2912 if (ItCount.hasAnyInfo()) {
2913 SmallVector<Instruction *, 16> Worklist;
2914 PushLoopPHIs(L, Worklist);
2916 SmallPtrSet<Instruction *, 8> Visited;
2917 while (!Worklist.empty()) {
2918 Instruction *I = Worklist.pop_back_val();
2919 if (!Visited.insert(I)) continue;
2921 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2922 Scalars.find(static_cast<Value *>(I));
2923 if (It != Scalars.end()) {
2924 // SCEVUnknown for a PHI either means that it has an unrecognized
2925 // structure, or it's a PHI that's in the progress of being computed
2926 // by createNodeForPHI. In the former case, additional loop trip count
2927 // information isn't going to change anything. In the later case,
2928 // createNodeForPHI will perform the necessary updates on its own when
2929 // it gets to that point.
2930 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
2932 ValuesAtScopes.erase(I);
2933 if (PHINode *PN = dyn_cast<PHINode>(I))
2934 ConstantEvolutionLoopExitValue.erase(PN);
2937 PushDefUseChildren(I, Worklist);
2941 return Pair.first->second;
2944 /// forgetLoopBackedgeTakenCount - This method should be called by the
2945 /// client when it has changed a loop in a way that may effect
2946 /// ScalarEvolution's ability to compute a trip count, or if the loop
2948 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
2949 BackedgeTakenCounts.erase(L);
2951 SmallVector<Instruction *, 16> Worklist;
2952 PushLoopPHIs(L, Worklist);
2954 SmallPtrSet<Instruction *, 8> Visited;
2955 while (!Worklist.empty()) {
2956 Instruction *I = Worklist.pop_back_val();
2957 if (!Visited.insert(I)) continue;
2959 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2960 Scalars.find(static_cast<Value *>(I));
2961 if (It != Scalars.end()) {
2963 ValuesAtScopes.erase(I);
2964 if (PHINode *PN = dyn_cast<PHINode>(I))
2965 ConstantEvolutionLoopExitValue.erase(PN);
2968 PushDefUseChildren(I, Worklist);
2972 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
2973 /// of the specified loop will execute.
2974 ScalarEvolution::BackedgeTakenInfo
2975 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
2976 SmallVector<BasicBlock*, 8> ExitingBlocks;
2977 L->getExitingBlocks(ExitingBlocks);
2979 // Examine all exits and pick the most conservative values.
2980 const SCEV *BECount = getCouldNotCompute();
2981 const SCEV *MaxBECount = getCouldNotCompute();
2982 bool CouldNotComputeBECount = false;
2983 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2984 BackedgeTakenInfo NewBTI =
2985 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
2987 if (NewBTI.Exact == getCouldNotCompute()) {
2988 // We couldn't compute an exact value for this exit, so
2989 // we won't be able to compute an exact value for the loop.
2990 CouldNotComputeBECount = true;
2991 BECount = getCouldNotCompute();
2992 } else if (!CouldNotComputeBECount) {
2993 if (BECount == getCouldNotCompute())
2994 BECount = NewBTI.Exact;
2996 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
2998 if (MaxBECount == getCouldNotCompute())
2999 MaxBECount = NewBTI.Max;
3000 else if (NewBTI.Max != getCouldNotCompute())
3001 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3004 return BackedgeTakenInfo(BECount, MaxBECount);
3007 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3008 /// of the specified loop will execute if it exits via the specified block.
3009 ScalarEvolution::BackedgeTakenInfo
3010 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3011 BasicBlock *ExitingBlock) {
3013 // Okay, we've chosen an exiting block. See what condition causes us to
3014 // exit at this block.
3016 // FIXME: we should be able to handle switch instructions (with a single exit)
3017 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3018 if (ExitBr == 0) return getCouldNotCompute();
3019 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3021 // At this point, we know we have a conditional branch that determines whether
3022 // the loop is exited. However, we don't know if the branch is executed each
3023 // time through the loop. If not, then the execution count of the branch will
3024 // not be equal to the trip count of the loop.
3026 // Currently we check for this by checking to see if the Exit branch goes to
3027 // the loop header. If so, we know it will always execute the same number of
3028 // times as the loop. We also handle the case where the exit block *is* the
3029 // loop header. This is common for un-rotated loops.
3031 // If both of those tests fail, walk up the unique predecessor chain to the
3032 // header, stopping if there is an edge that doesn't exit the loop. If the
3033 // header is reached, the execution count of the branch will be equal to the
3034 // trip count of the loop.
3036 // More extensive analysis could be done to handle more cases here.
3038 if (ExitBr->getSuccessor(0) != L->getHeader() &&
3039 ExitBr->getSuccessor(1) != L->getHeader() &&
3040 ExitBr->getParent() != L->getHeader()) {
3041 // The simple checks failed, try climbing the unique predecessor chain
3042 // up to the header.
3044 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3045 BasicBlock *Pred = BB->getUniquePredecessor();
3047 return getCouldNotCompute();
3048 TerminatorInst *PredTerm = Pred->getTerminator();
3049 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3050 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3053 // If the predecessor has a successor that isn't BB and isn't
3054 // outside the loop, assume the worst.
3055 if (L->contains(PredSucc))
3056 return getCouldNotCompute();
3058 if (Pred == L->getHeader()) {
3065 return getCouldNotCompute();
3068 // Procede to the next level to examine the exit condition expression.
3069 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3070 ExitBr->getSuccessor(0),
3071 ExitBr->getSuccessor(1));
3074 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3075 /// backedge of the specified loop will execute if its exit condition
3076 /// were a conditional branch of ExitCond, TBB, and FBB.
3077 ScalarEvolution::BackedgeTakenInfo
3078 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3082 // Check if the controlling expression for this loop is an And or Or.
3083 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3084 if (BO->getOpcode() == Instruction::And) {
3085 // Recurse on the operands of the and.
3086 BackedgeTakenInfo BTI0 =
3087 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3088 BackedgeTakenInfo BTI1 =
3089 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3090 const SCEV *BECount = getCouldNotCompute();
3091 const SCEV *MaxBECount = getCouldNotCompute();
3092 if (L->contains(TBB)) {
3093 // Both conditions must be true for the loop to continue executing.
3094 // Choose the less conservative count.
3095 if (BTI0.Exact == getCouldNotCompute() ||
3096 BTI1.Exact == getCouldNotCompute())
3097 BECount = getCouldNotCompute();
3099 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3100 if (BTI0.Max == getCouldNotCompute())
3101 MaxBECount = BTI1.Max;
3102 else if (BTI1.Max == getCouldNotCompute())
3103 MaxBECount = BTI0.Max;
3105 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3107 // Both conditions must be true for the loop to exit.
3108 assert(L->contains(FBB) && "Loop block has no successor in loop!");
3109 if (BTI0.Exact != getCouldNotCompute() &&
3110 BTI1.Exact != getCouldNotCompute())
3111 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3112 if (BTI0.Max != getCouldNotCompute() &&
3113 BTI1.Max != getCouldNotCompute())
3114 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3117 return BackedgeTakenInfo(BECount, MaxBECount);
3119 if (BO->getOpcode() == Instruction::Or) {
3120 // Recurse on the operands of the or.
3121 BackedgeTakenInfo BTI0 =
3122 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3123 BackedgeTakenInfo BTI1 =
3124 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3125 const SCEV *BECount = getCouldNotCompute();
3126 const SCEV *MaxBECount = getCouldNotCompute();
3127 if (L->contains(FBB)) {
3128 // Both conditions must be false for the loop to continue executing.
3129 // Choose the less conservative count.
3130 if (BTI0.Exact == getCouldNotCompute() ||
3131 BTI1.Exact == getCouldNotCompute())
3132 BECount = getCouldNotCompute();
3134 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3135 if (BTI0.Max == getCouldNotCompute())
3136 MaxBECount = BTI1.Max;
3137 else if (BTI1.Max == getCouldNotCompute())
3138 MaxBECount = BTI0.Max;
3140 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3142 // Both conditions must be false for the loop to exit.
3143 assert(L->contains(TBB) && "Loop block has no successor in loop!");
3144 if (BTI0.Exact != getCouldNotCompute() &&
3145 BTI1.Exact != getCouldNotCompute())
3146 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3147 if (BTI0.Max != getCouldNotCompute() &&
3148 BTI1.Max != getCouldNotCompute())
3149 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3152 return BackedgeTakenInfo(BECount, MaxBECount);
3156 // With an icmp, it may be feasible to compute an exact backedge-taken count.
3157 // Procede to the next level to examine the icmp.
3158 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3159 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3161 // If it's not an integer or pointer comparison then compute it the hard way.
3162 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3165 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3166 /// backedge of the specified loop will execute if its exit condition
3167 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3168 ScalarEvolution::BackedgeTakenInfo
3169 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3174 // If the condition was exit on true, convert the condition to exit on false
3175 ICmpInst::Predicate Cond;
3176 if (!L->contains(FBB))
3177 Cond = ExitCond->getPredicate();
3179 Cond = ExitCond->getInversePredicate();
3181 // Handle common loops like: for (X = "string"; *X; ++X)
3182 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3183 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3185 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3186 if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3187 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3188 return BackedgeTakenInfo(ItCnt,
3189 isa<SCEVConstant>(ItCnt) ? ItCnt :
3190 getConstant(APInt::getMaxValue(BitWidth)-1));
3194 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3195 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3197 // Try to evaluate any dependencies out of the loop.
3198 LHS = getSCEVAtScope(LHS, L);
3199 RHS = getSCEVAtScope(RHS, L);
3201 // At this point, we would like to compute how many iterations of the
3202 // loop the predicate will return true for these inputs.
3203 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3204 // If there is a loop-invariant, force it into the RHS.
3205 std::swap(LHS, RHS);
3206 Cond = ICmpInst::getSwappedPredicate(Cond);
3209 // If we have a comparison of a chrec against a constant, try to use value
3210 // ranges to answer this query.
3211 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3212 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3213 if (AddRec->getLoop() == L) {
3214 // Form the constant range.
3215 ConstantRange CompRange(
3216 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3218 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3219 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3223 case ICmpInst::ICMP_NE: { // while (X != Y)
3224 // Convert to: while (X-Y != 0)
3225 const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3226 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3229 case ICmpInst::ICMP_EQ: {
3230 // Convert to: while (X-Y == 0) // while (X == Y)
3231 const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3232 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3235 case ICmpInst::ICMP_SLT: {
3236 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3237 if (BTI.hasAnyInfo()) return BTI;
3240 case ICmpInst::ICMP_SGT: {
3241 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3242 getNotSCEV(RHS), L, true);
3243 if (BTI.hasAnyInfo()) return BTI;
3246 case ICmpInst::ICMP_ULT: {
3247 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3248 if (BTI.hasAnyInfo()) return BTI;
3251 case ICmpInst::ICMP_UGT: {
3252 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3253 getNotSCEV(RHS), L, false);
3254 if (BTI.hasAnyInfo()) return BTI;
3259 errs() << "ComputeBackedgeTakenCount ";
3260 if (ExitCond->getOperand(0)->getType()->isUnsigned())
3261 errs() << "[unsigned] ";
3262 errs() << *LHS << " "
3263 << Instruction::getOpcodeName(Instruction::ICmp)
3264 << " " << *RHS << "\n";
3269 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3272 static ConstantInt *
3273 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3274 ScalarEvolution &SE) {
3275 const SCEV *InVal = SE.getConstant(C);
3276 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3277 assert(isa<SCEVConstant>(Val) &&
3278 "Evaluation of SCEV at constant didn't fold correctly?");
3279 return cast<SCEVConstant>(Val)->getValue();
3282 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3283 /// and a GEP expression (missing the pointer index) indexing into it, return
3284 /// the addressed element of the initializer or null if the index expression is
3287 GetAddressedElementFromGlobal(GlobalVariable *GV,
3288 const std::vector<ConstantInt*> &Indices) {
3289 Constant *Init = GV->getInitializer();
3290 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3291 uint64_t Idx = Indices[i]->getZExtValue();
3292 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3293 assert(Idx < CS->getNumOperands() && "Bad struct index!");
3294 Init = cast<Constant>(CS->getOperand(Idx));
3295 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3296 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
3297 Init = cast<Constant>(CA->getOperand(Idx));
3298 } else if (isa<ConstantAggregateZero>(Init)) {
3299 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3300 assert(Idx < STy->getNumElements() && "Bad struct index!");
3301 Init = Constant::getNullValue(STy->getElementType(Idx));
3302 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3303 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
3304 Init = Constant::getNullValue(ATy->getElementType());
3306 LLVM_UNREACHABLE("Unknown constant aggregate type!");
3310 return 0; // Unknown initializer type
3316 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3317 /// 'icmp op load X, cst', try to see if we can compute the backedge
3318 /// execution count.
3320 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3324 ICmpInst::Predicate predicate) {
3325 if (LI->isVolatile()) return getCouldNotCompute();
3327 // Check to see if the loaded pointer is a getelementptr of a global.
3328 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3329 if (!GEP) return getCouldNotCompute();
3331 // Make sure that it is really a constant global we are gepping, with an
3332 // initializer, and make sure the first IDX is really 0.
3333 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3334 if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3335 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3336 !cast<Constant>(GEP->getOperand(1))->isNullValue())
3337 return getCouldNotCompute();
3339 // Okay, we allow one non-constant index into the GEP instruction.
3341 std::vector<ConstantInt*> Indexes;
3342 unsigned VarIdxNum = 0;
3343 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3344 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3345 Indexes.push_back(CI);
3346 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3347 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
3348 VarIdx = GEP->getOperand(i);
3350 Indexes.push_back(0);
3353 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3354 // Check to see if X is a loop variant variable value now.
3355 const SCEV *Idx = getSCEV(VarIdx);
3356 Idx = getSCEVAtScope(Idx, L);
3358 // We can only recognize very limited forms of loop index expressions, in
3359 // particular, only affine AddRec's like {C1,+,C2}.
3360 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3361 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3362 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3363 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3364 return getCouldNotCompute();
3366 unsigned MaxSteps = MaxBruteForceIterations;
3367 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3368 ConstantInt *ItCst =
3369 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3370 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3372 // Form the GEP offset.
3373 Indexes[VarIdxNum] = Val;
3375 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3376 if (Result == 0) break; // Cannot compute!
3378 // Evaluate the condition for this iteration.
3379 Result = ConstantExpr::getICmp(predicate, Result, RHS);
3380 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
3381 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3383 errs() << "\n***\n*** Computed loop count " << *ItCst
3384 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3387 ++NumArrayLenItCounts;
3388 return getConstant(ItCst); // Found terminating iteration!
3391 return getCouldNotCompute();
3395 /// CanConstantFold - Return true if we can constant fold an instruction of the
3396 /// specified type, assuming that all operands were constants.
3397 static bool CanConstantFold(const Instruction *I) {
3398 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3399 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3402 if (const CallInst *CI = dyn_cast<CallInst>(I))
3403 if (const Function *F = CI->getCalledFunction())
3404 return canConstantFoldCallTo(F);
3408 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3409 /// in the loop that V is derived from. We allow arbitrary operations along the
3410 /// way, but the operands of an operation must either be constants or a value
3411 /// derived from a constant PHI. If this expression does not fit with these
3412 /// constraints, return null.
3413 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3414 // If this is not an instruction, or if this is an instruction outside of the
3415 // loop, it can't be derived from a loop PHI.
3416 Instruction *I = dyn_cast<Instruction>(V);
3417 if (I == 0 || !L->contains(I->getParent())) return 0;
3419 if (PHINode *PN = dyn_cast<PHINode>(I)) {
3420 if (L->getHeader() == I->getParent())
3423 // We don't currently keep track of the control flow needed to evaluate
3424 // PHIs, so we cannot handle PHIs inside of loops.
3428 // If we won't be able to constant fold this expression even if the operands
3429 // are constants, return early.
3430 if (!CanConstantFold(I)) return 0;
3432 // Otherwise, we can evaluate this instruction if all of its operands are
3433 // constant or derived from a PHI node themselves.
3435 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3436 if (!(isa<Constant>(I->getOperand(Op)) ||
3437 isa<GlobalValue>(I->getOperand(Op)))) {
3438 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3439 if (P == 0) return 0; // Not evolving from PHI
3443 return 0; // Evolving from multiple different PHIs.
3446 // This is a expression evolving from a constant PHI!
3450 /// EvaluateExpression - Given an expression that passes the
3451 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3452 /// in the loop has the value PHIVal. If we can't fold this expression for some
3453 /// reason, return null.
3454 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3455 if (isa<PHINode>(V)) return PHIVal;
3456 if (Constant *C = dyn_cast<Constant>(V)) return C;
3457 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3458 Instruction *I = cast<Instruction>(V);
3459 LLVMContext *Context = I->getParent()->getContext();
3461 std::vector<Constant*> Operands;
3462 Operands.resize(I->getNumOperands());
3464 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3465 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3466 if (Operands[i] == 0) return 0;
3469 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3470 return ConstantFoldCompareInstOperands(CI->getPredicate(),
3471 &Operands[0], Operands.size(),
3474 return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3475 &Operands[0], Operands.size(),
3479 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3480 /// in the header of its containing loop, we know the loop executes a
3481 /// constant number of times, and the PHI node is just a recurrence
3482 /// involving constants, fold it.
3484 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3487 std::map<PHINode*, Constant*>::iterator I =
3488 ConstantEvolutionLoopExitValue.find(PN);
3489 if (I != ConstantEvolutionLoopExitValue.end())
3492 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3493 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
3495 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3497 // Since the loop is canonicalized, the PHI node must have two entries. One
3498 // entry must be a constant (coming in from outside of the loop), and the
3499 // second must be derived from the same PHI.
3500 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3501 Constant *StartCST =
3502 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3504 return RetVal = 0; // Must be a constant.
3506 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3507 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3509 return RetVal = 0; // Not derived from same PHI.
3511 // Execute the loop symbolically to determine the exit value.
3512 if (BEs.getActiveBits() >= 32)
3513 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3515 unsigned NumIterations = BEs.getZExtValue(); // must be in range
3516 unsigned IterationNum = 0;
3517 for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3518 if (IterationNum == NumIterations)
3519 return RetVal = PHIVal; // Got exit value!
3521 // Compute the value of the PHI node for the next iteration.
3522 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3523 if (NextPHI == PHIVal)
3524 return RetVal = NextPHI; // Stopped evolving!
3526 return 0; // Couldn't evaluate!
3531 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3532 /// constant number of times (the condition evolves only from constants),
3533 /// try to evaluate a few iterations of the loop until we get the exit
3534 /// condition gets a value of ExitWhen (true or false). If we cannot
3535 /// evaluate the trip count of the loop, return getCouldNotCompute().
3537 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3540 PHINode *PN = getConstantEvolvingPHI(Cond, L);
3541 if (PN == 0) return getCouldNotCompute();
3543 // Since the loop is canonicalized, the PHI node must have two entries. One
3544 // entry must be a constant (coming in from outside of the loop), and the
3545 // second must be derived from the same PHI.
3546 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3547 Constant *StartCST =
3548 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3549 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
3551 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3552 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3553 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
3555 // Okay, we find a PHI node that defines the trip count of this loop. Execute
3556 // the loop symbolically to determine when the condition gets a value of
3558 unsigned IterationNum = 0;
3559 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
3560 for (Constant *PHIVal = StartCST;
3561 IterationNum != MaxIterations; ++IterationNum) {
3562 ConstantInt *CondVal =
3563 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3565 // Couldn't symbolically evaluate.
3566 if (!CondVal) return getCouldNotCompute();
3568 if (CondVal->getValue() == uint64_t(ExitWhen)) {
3569 ++NumBruteForceTripCountsComputed;
3570 return getConstant(Type::Int32Ty, IterationNum);
3573 // Compute the value of the PHI node for the next iteration.
3574 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3575 if (NextPHI == 0 || NextPHI == PHIVal)
3576 return getCouldNotCompute();// Couldn't evaluate or not making progress...
3580 // Too many iterations were needed to evaluate.
3581 return getCouldNotCompute();
3584 /// getSCEVAtScope - Return a SCEV expression handle for the specified value
3585 /// at the specified scope in the program. The L value specifies a loop
3586 /// nest to evaluate the expression at, where null is the top-level or a
3587 /// specified loop is immediately inside of the loop.
3589 /// This method can be used to compute the exit value for a variable defined
3590 /// in a loop by querying what the value will hold in the parent loop.
3592 /// In the case that a relevant loop exit value cannot be computed, the
3593 /// original value V is returned.
3594 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3595 // FIXME: this should be turned into a virtual method on SCEV!
3597 if (isa<SCEVConstant>(V)) return V;
3599 // If this instruction is evolved from a constant-evolving PHI, compute the
3600 // exit value from the loop without using SCEVs.
3601 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3602 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3603 const Loop *LI = (*this->LI)[I->getParent()];
3604 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
3605 if (PHINode *PN = dyn_cast<PHINode>(I))
3606 if (PN->getParent() == LI->getHeader()) {
3607 // Okay, there is no closed form solution for the PHI node. Check
3608 // to see if the loop that contains it has a known backedge-taken
3609 // count. If so, we may be able to force computation of the exit
3611 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3612 if (const SCEVConstant *BTCC =
3613 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3614 // Okay, we know how many times the containing loop executes. If
3615 // this is a constant evolving PHI node, get the final value at
3616 // the specified iteration number.
3617 Constant *RV = getConstantEvolutionLoopExitValue(PN,
3618 BTCC->getValue()->getValue(),
3620 if (RV) return getSCEV(RV);
3624 // Okay, this is an expression that we cannot symbolically evaluate
3625 // into a SCEV. Check to see if it's possible to symbolically evaluate
3626 // the arguments into constants, and if so, try to constant propagate the
3627 // result. This is particularly useful for computing loop exit values.
3628 if (CanConstantFold(I)) {
3629 // Check to see if we've folded this instruction at this loop before.
3630 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3631 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3632 Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3634 return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3636 std::vector<Constant*> Operands;
3637 Operands.reserve(I->getNumOperands());
3638 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3639 Value *Op = I->getOperand(i);
3640 if (Constant *C = dyn_cast<Constant>(Op)) {
3641 Operands.push_back(C);
3643 // If any of the operands is non-constant and if they are
3644 // non-integer and non-pointer, don't even try to analyze them
3645 // with scev techniques.
3646 if (!isSCEVable(Op->getType()))
3649 const SCEV *OpV = getSCEVAtScope(getSCEV(Op), L);
3650 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3651 Constant *C = SC->getValue();
3652 if (C->getType() != Op->getType())
3653 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3657 Operands.push_back(C);
3658 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3659 if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3660 if (C->getType() != Op->getType())
3662 ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3666 Operands.push_back(C);
3676 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3677 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3678 &Operands[0], Operands.size(),
3681 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3682 &Operands[0], Operands.size(), Context);
3683 Pair.first->second = C;
3688 // This is some other type of SCEVUnknown, just return it.
3692 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3693 // Avoid performing the look-up in the common case where the specified
3694 // expression has no loop-variant portions.
3695 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3696 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3697 if (OpAtScope != Comm->getOperand(i)) {
3698 // Okay, at least one of these operands is loop variant but might be
3699 // foldable. Build a new instance of the folded commutative expression.
3700 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3701 Comm->op_begin()+i);
3702 NewOps.push_back(OpAtScope);
3704 for (++i; i != e; ++i) {
3705 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3706 NewOps.push_back(OpAtScope);
3708 if (isa<SCEVAddExpr>(Comm))
3709 return getAddExpr(NewOps);
3710 if (isa<SCEVMulExpr>(Comm))
3711 return getMulExpr(NewOps);
3712 if (isa<SCEVSMaxExpr>(Comm))
3713 return getSMaxExpr(NewOps);
3714 if (isa<SCEVUMaxExpr>(Comm))
3715 return getUMaxExpr(NewOps);
3716 LLVM_UNREACHABLE("Unknown commutative SCEV type!");
3719 // If we got here, all operands are loop invariant.
3723 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3724 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3725 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3726 if (LHS == Div->getLHS() && RHS == Div->getRHS())
3727 return Div; // must be loop invariant
3728 return getUDivExpr(LHS, RHS);
3731 // If this is a loop recurrence for a loop that does not contain L, then we
3732 // are dealing with the final value computed by the loop.
3733 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3734 if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3735 // To evaluate this recurrence, we need to know how many times the AddRec
3736 // loop iterates. Compute this now.
3737 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3738 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3740 // Then, evaluate the AddRec.
3741 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3746 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3747 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3748 if (Op == Cast->getOperand())
3749 return Cast; // must be loop invariant
3750 return getZeroExtendExpr(Op, Cast->getType());
3753 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3754 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3755 if (Op == Cast->getOperand())
3756 return Cast; // must be loop invariant
3757 return getSignExtendExpr(Op, Cast->getType());
3760 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3761 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3762 if (Op == Cast->getOperand())
3763 return Cast; // must be loop invariant
3764 return getTruncateExpr(Op, Cast->getType());
3767 LLVM_UNREACHABLE("Unknown SCEV type!");
3771 /// getSCEVAtScope - This is a convenience function which does
3772 /// getSCEVAtScope(getSCEV(V), L).
3773 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3774 return getSCEVAtScope(getSCEV(V), L);
3777 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3778 /// following equation:
3780 /// A * X = B (mod N)
3782 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3783 /// A and B isn't important.
3785 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3786 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3787 ScalarEvolution &SE) {
3788 uint32_t BW = A.getBitWidth();
3789 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3790 assert(A != 0 && "A must be non-zero.");
3794 // The gcd of A and N may have only one prime factor: 2. The number of
3795 // trailing zeros in A is its multiplicity
3796 uint32_t Mult2 = A.countTrailingZeros();
3799 // 2. Check if B is divisible by D.
3801 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3802 // is not less than multiplicity of this prime factor for D.
3803 if (B.countTrailingZeros() < Mult2)
3804 return SE.getCouldNotCompute();
3806 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3809 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
3810 // bit width during computations.
3811 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
3812 APInt Mod(BW + 1, 0);
3813 Mod.set(BW - Mult2); // Mod = N / D
3814 APInt I = AD.multiplicativeInverse(Mod);
3816 // 4. Compute the minimum unsigned root of the equation:
3817 // I * (B / D) mod (N / D)
3818 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3820 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3822 return SE.getConstant(Result.trunc(BW));
3825 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3826 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
3827 /// might be the same) or two SCEVCouldNotCompute objects.
3829 static std::pair<const SCEV *,const SCEV *>
3830 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3831 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3832 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3833 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3834 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3836 // We currently can only solve this if the coefficients are constants.
3837 if (!LC || !MC || !NC) {
3838 const SCEV *CNC = SE.getCouldNotCompute();
3839 return std::make_pair(CNC, CNC);
3842 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3843 const APInt &L = LC->getValue()->getValue();
3844 const APInt &M = MC->getValue()->getValue();
3845 const APInt &N = NC->getValue()->getValue();
3846 APInt Two(BitWidth, 2);
3847 APInt Four(BitWidth, 4);
3850 using namespace APIntOps;
3852 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
3853 // The B coefficient is M-N/2
3857 // The A coefficient is N/2
3858 APInt A(N.sdiv(Two));
3860 // Compute the B^2-4ac term.
3863 SqrtTerm -= Four * (A * C);
3865 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
3866 // integer value or else APInt::sqrt() will assert.
3867 APInt SqrtVal(SqrtTerm.sqrt());
3869 // Compute the two solutions for the quadratic formula.
3870 // The divisions must be performed as signed divisions.
3872 APInt TwoA( A << 1 );
3873 if (TwoA.isMinValue()) {
3874 const SCEV *CNC = SE.getCouldNotCompute();
3875 return std::make_pair(CNC, CNC);
3878 LLVMContext *Context = SE.getContext();
3880 ConstantInt *Solution1 =
3881 Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
3882 ConstantInt *Solution2 =
3883 Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
3885 return std::make_pair(SE.getConstant(Solution1),
3886 SE.getConstant(Solution2));
3887 } // end APIntOps namespace
3890 /// HowFarToZero - Return the number of times a backedge comparing the specified
3891 /// value to zero will execute. If not computable, return CouldNotCompute.
3892 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
3893 // If the value is a constant
3894 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3895 // If the value is already zero, the branch will execute zero times.
3896 if (C->getValue()->isZero()) return C;
3897 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3900 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
3901 if (!AddRec || AddRec->getLoop() != L)
3902 return getCouldNotCompute();
3904 if (AddRec->isAffine()) {
3905 // If this is an affine expression, the execution count of this branch is
3906 // the minimum unsigned root of the following equation:
3908 // Start + Step*N = 0 (mod 2^BW)
3912 // Step*N = -Start (mod 2^BW)
3914 // where BW is the common bit width of Start and Step.
3916 // Get the initial value for the loop.
3917 const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
3918 L->getParentLoop());
3919 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
3920 L->getParentLoop());
3922 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
3923 // For now we handle only constant steps.
3925 // First, handle unitary steps.
3926 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
3927 return getNegativeSCEV(Start); // N = -Start (as unsigned)
3928 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
3929 return Start; // N = Start (as unsigned)
3931 // Then, try to solve the above equation provided that Start is constant.
3932 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
3933 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
3934 -StartC->getValue()->getValue(),
3937 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
3938 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
3939 // the quadratic equation to solve it.
3940 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
3942 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
3943 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
3946 errs() << "HFTZ: " << *V << " - sol#1: " << *R1
3947 << " sol#2: " << *R2 << "\n";
3949 // Pick the smallest positive root value.
3950 if (ConstantInt *CB =
3951 dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
3952 R1->getValue(), R2->getValue()))) {
3953 if (CB->getZExtValue() == false)
3954 std::swap(R1, R2); // R1 is the minimum root now.
3956 // We can only use this value if the chrec ends up with an exact zero
3957 // value at this index. When solving for "X*X != 5", for example, we
3958 // should not accept a root of 2.
3959 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
3961 return R1; // We found a quadratic root!
3966 return getCouldNotCompute();
3969 /// HowFarToNonZero - Return the number of times a backedge checking the
3970 /// specified value for nonzero will execute. If not computable, return
3972 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
3973 // Loops that look like: while (X == 0) are very strange indeed. We don't
3974 // handle them yet except for the trivial case. This could be expanded in the
3975 // future as needed.
3977 // If the value is a constant, check to see if it is known to be non-zero
3978 // already. If so, the backedge will execute zero times.
3979 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3980 if (!C->getValue()->isNullValue())
3981 return getIntegerSCEV(0, C->getType());
3982 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3985 // We could implement others, but I really doubt anyone writes loops like
3986 // this, and if they did, they would already be constant folded.
3987 return getCouldNotCompute();
3990 /// getLoopPredecessor - If the given loop's header has exactly one unique
3991 /// predecessor outside the loop, return it. Otherwise return null.
3993 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
3994 BasicBlock *Header = L->getHeader();
3995 BasicBlock *Pred = 0;
3996 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
3998 if (!L->contains(*PI)) {
3999 if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4005 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4006 /// (which may not be an immediate predecessor) which has exactly one
4007 /// successor from which BB is reachable, or null if no such block is
4011 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4012 // If the block has a unique predecessor, then there is no path from the
4013 // predecessor to the block that does not go through the direct edge
4014 // from the predecessor to the block.
4015 if (BasicBlock *Pred = BB->getSinglePredecessor())
4018 // A loop's header is defined to be a block that dominates the loop.
4019 // If the header has a unique predecessor outside the loop, it must be
4020 // a block that has exactly one successor that can reach the loop.
4021 if (Loop *L = LI->getLoopFor(BB))
4022 return getLoopPredecessor(L);
4027 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4028 /// testing whether two expressions are equal, however for the purposes of
4029 /// looking for a condition guarding a loop, it can be useful to be a little
4030 /// more general, since a front-end may have replicated the controlling
4033 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4034 // Quick check to see if they are the same SCEV.
4035 if (A == B) return true;
4037 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4038 // two different instructions with the same value. Check for this case.
4039 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4040 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4041 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4042 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4043 if (AI->isIdenticalTo(BI))
4046 // Otherwise assume they may have a different value.
4050 /// isLoopGuardedByCond - Test whether entry to the loop is protected by
4051 /// a conditional between LHS and RHS. This is used to help avoid max
4052 /// expressions in loop trip counts.
4053 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4054 ICmpInst::Predicate Pred,
4055 const SCEV *LHS, const SCEV *RHS) {
4056 // Interpret a null as meaning no loop, where there is obviously no guard
4057 // (interprocedural conditions notwithstanding).
4058 if (!L) return false;
4060 BasicBlock *Predecessor = getLoopPredecessor(L);
4061 BasicBlock *PredecessorDest = L->getHeader();
4063 // Starting at the loop predecessor, climb up the predecessor chain, as long
4064 // as there are predecessors that can be found that have unique successors
4065 // leading to the original header.
4067 PredecessorDest = Predecessor,
4068 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4070 BranchInst *LoopEntryPredicate =
4071 dyn_cast<BranchInst>(Predecessor->getTerminator());
4072 if (!LoopEntryPredicate ||
4073 LoopEntryPredicate->isUnconditional())
4076 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4077 LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4084 /// isNecessaryCond - Test whether the given CondValue value is a condition
4085 /// which is at least as strict as the one described by Pred, LHS, and RHS.
4086 bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4087 ICmpInst::Predicate Pred,
4088 const SCEV *LHS, const SCEV *RHS,
4090 // Recursivly handle And and Or conditions.
4091 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4092 if (BO->getOpcode() == Instruction::And) {
4094 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4095 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4096 } else if (BO->getOpcode() == Instruction::Or) {
4098 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4099 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4103 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4104 if (!ICI) return false;
4106 // Now that we found a conditional branch that dominates the loop, check to
4107 // see if it is the comparison we are looking for.
4108 Value *PreCondLHS = ICI->getOperand(0);
4109 Value *PreCondRHS = ICI->getOperand(1);
4110 ICmpInst::Predicate Cond;
4112 Cond = ICI->getInversePredicate();
4114 Cond = ICI->getPredicate();
4117 ; // An exact match.
4118 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE)
4119 ; // The actual condition is beyond sufficient.
4121 // Check a few special cases.
4123 case ICmpInst::ICMP_UGT:
4124 if (Pred == ICmpInst::ICMP_ULT) {
4125 std::swap(PreCondLHS, PreCondRHS);
4126 Cond = ICmpInst::ICMP_ULT;
4130 case ICmpInst::ICMP_SGT:
4131 if (Pred == ICmpInst::ICMP_SLT) {
4132 std::swap(PreCondLHS, PreCondRHS);
4133 Cond = ICmpInst::ICMP_SLT;
4137 case ICmpInst::ICMP_NE:
4138 // Expressions like (x >u 0) are often canonicalized to (x != 0),
4139 // so check for this case by checking if the NE is comparing against
4140 // a minimum or maximum constant.
4141 if (!ICmpInst::isTrueWhenEqual(Pred))
4142 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) {
4143 const APInt &A = CI->getValue();
4145 case ICmpInst::ICMP_SLT:
4146 if (A.isMaxSignedValue()) break;
4148 case ICmpInst::ICMP_SGT:
4149 if (A.isMinSignedValue()) break;
4151 case ICmpInst::ICMP_ULT:
4152 if (A.isMaxValue()) break;
4154 case ICmpInst::ICMP_UGT:
4155 if (A.isMinValue()) break;
4160 Cond = ICmpInst::ICMP_NE;
4161 // NE is symmetric but the original comparison may not be. Swap
4162 // the operands if necessary so that they match below.
4163 if (isa<SCEVConstant>(LHS))
4164 std::swap(PreCondLHS, PreCondRHS);
4169 // We weren't able to reconcile the condition.
4173 if (!PreCondLHS->getType()->isInteger()) return false;
4175 const SCEV *PreCondLHSSCEV = getSCEV(PreCondLHS);
4176 const SCEV *PreCondRHSSCEV = getSCEV(PreCondRHS);
4177 return (HasSameValue(LHS, PreCondLHSSCEV) &&
4178 HasSameValue(RHS, PreCondRHSSCEV)) ||
4179 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) &&
4180 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)));
4183 /// getBECount - Subtract the end and start values and divide by the step,
4184 /// rounding up, to get the number of times the backedge is executed. Return
4185 /// CouldNotCompute if an intermediate computation overflows.
4186 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4189 const Type *Ty = Start->getType();
4190 const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4191 const SCEV *Diff = getMinusSCEV(End, Start);
4192 const SCEV *RoundUp = getAddExpr(Step, NegOne);
4194 // Add an adjustment to the difference between End and Start so that
4195 // the division will effectively round up.
4196 const SCEV *Add = getAddExpr(Diff, RoundUp);
4198 // Check Add for unsigned overflow.
4199 // TODO: More sophisticated things could be done here.
4200 const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4201 const SCEV *OperandExtendedAdd =
4202 getAddExpr(getZeroExtendExpr(Diff, WideTy),
4203 getZeroExtendExpr(RoundUp, WideTy));
4204 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4205 return getCouldNotCompute();
4207 return getUDivExpr(Add, Step);
4210 /// HowManyLessThans - Return the number of times a backedge containing the
4211 /// specified less-than comparison will execute. If not computable, return
4212 /// CouldNotCompute.
4213 ScalarEvolution::BackedgeTakenInfo
4214 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4215 const Loop *L, bool isSigned) {
4216 // Only handle: "ADDREC < LoopInvariant".
4217 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4219 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4220 if (!AddRec || AddRec->getLoop() != L)
4221 return getCouldNotCompute();
4223 if (AddRec->isAffine()) {
4224 // FORNOW: We only support unit strides.
4225 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4226 const SCEV *Step = AddRec->getStepRecurrence(*this);
4228 // TODO: handle non-constant strides.
4229 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4230 if (!CStep || CStep->isZero())
4231 return getCouldNotCompute();
4232 if (CStep->isOne()) {
4233 // With unit stride, the iteration never steps past the limit value.
4234 } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4235 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4236 // Test whether a positive iteration iteration can step past the limit
4237 // value and past the maximum value for its type in a single step.
4239 APInt Max = APInt::getSignedMaxValue(BitWidth);
4240 if ((Max - CStep->getValue()->getValue())
4241 .slt(CLimit->getValue()->getValue()))
4242 return getCouldNotCompute();
4244 APInt Max = APInt::getMaxValue(BitWidth);
4245 if ((Max - CStep->getValue()->getValue())
4246 .ult(CLimit->getValue()->getValue()))
4247 return getCouldNotCompute();
4250 // TODO: handle non-constant limit values below.
4251 return getCouldNotCompute();
4253 // TODO: handle negative strides below.
4254 return getCouldNotCompute();
4256 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4257 // m. So, we count the number of iterations in which {n,+,s} < m is true.
4258 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4259 // treat m-n as signed nor unsigned due to overflow possibility.
4261 // First, we get the value of the LHS in the first iteration: n
4262 const SCEV *Start = AddRec->getOperand(0);
4264 // Determine the minimum constant start value.
4265 const SCEV *MinStart = isa<SCEVConstant>(Start) ? Start :
4266 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) :
4267 APInt::getMinValue(BitWidth));
4269 // If we know that the condition is true in order to enter the loop,
4270 // then we know that it will run exactly (m-n)/s times. Otherwise, we
4271 // only know that it will execute (max(m,n)-n)/s times. In both cases,
4272 // the division must round up.
4273 const SCEV *End = RHS;
4274 if (!isLoopGuardedByCond(L,
4275 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
4276 getMinusSCEV(Start, Step), RHS))
4277 End = isSigned ? getSMaxExpr(RHS, Start)
4278 : getUMaxExpr(RHS, Start);
4280 // Determine the maximum constant end value.
4281 const SCEV *MaxEnd =
4282 isa<SCEVConstant>(End) ? End :
4283 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth)
4284 .ashr(GetMinSignBits(End) - 1) :
4285 APInt::getMaxValue(BitWidth)
4286 .lshr(GetMinLeadingZeros(End)));
4288 // Finally, we subtract these two values and divide, rounding up, to get
4289 // the number of times the backedge is executed.
4290 const SCEV *BECount = getBECount(Start, End, Step);
4292 // The maximum backedge count is similar, except using the minimum start
4293 // value and the maximum end value.
4294 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4296 return BackedgeTakenInfo(BECount, MaxBECount);
4299 return getCouldNotCompute();
4302 /// getNumIterationsInRange - Return the number of iterations of this loop that
4303 /// produce values in the specified constant range. Another way of looking at
4304 /// this is that it returns the first iteration number where the value is not in
4305 /// the condition, thus computing the exit count. If the iteration count can't
4306 /// be computed, an instance of SCEVCouldNotCompute is returned.
4307 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4308 ScalarEvolution &SE) const {
4309 if (Range.isFullSet()) // Infinite loop.
4310 return SE.getCouldNotCompute();
4312 // If the start is a non-zero constant, shift the range to simplify things.
4313 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4314 if (!SC->getValue()->isZero()) {
4315 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4316 Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4317 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4318 if (const SCEVAddRecExpr *ShiftedAddRec =
4319 dyn_cast<SCEVAddRecExpr>(Shifted))
4320 return ShiftedAddRec->getNumIterationsInRange(
4321 Range.subtract(SC->getValue()->getValue()), SE);
4322 // This is strange and shouldn't happen.
4323 return SE.getCouldNotCompute();
4326 // The only time we can solve this is when we have all constant indices.
4327 // Otherwise, we cannot determine the overflow conditions.
4328 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4329 if (!isa<SCEVConstant>(getOperand(i)))
4330 return SE.getCouldNotCompute();
4333 // Okay at this point we know that all elements of the chrec are constants and
4334 // that the start element is zero.
4336 // First check to see if the range contains zero. If not, the first
4338 unsigned BitWidth = SE.getTypeSizeInBits(getType());
4339 if (!Range.contains(APInt(BitWidth, 0)))
4340 return SE.getIntegerSCEV(0, getType());
4343 // If this is an affine expression then we have this situation:
4344 // Solve {0,+,A} in Range === Ax in Range
4346 // We know that zero is in the range. If A is positive then we know that
4347 // the upper value of the range must be the first possible exit value.
4348 // If A is negative then the lower of the range is the last possible loop
4349 // value. Also note that we already checked for a full range.
4350 APInt One(BitWidth,1);
4351 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4352 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4354 // The exit value should be (End+A)/A.
4355 APInt ExitVal = (End + A).udiv(A);
4356 ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4358 // Evaluate at the exit value. If we really did fall out of the valid
4359 // range, then we computed our trip count, otherwise wrap around or other
4360 // things must have happened.
4361 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4362 if (Range.contains(Val->getValue()))
4363 return SE.getCouldNotCompute(); // Something strange happened
4365 // Ensure that the previous value is in the range. This is a sanity check.
4366 assert(Range.contains(
4367 EvaluateConstantChrecAtConstant(this,
4368 SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4369 "Linear scev computation is off in a bad way!");
4370 return SE.getConstant(ExitValue);
4371 } else if (isQuadratic()) {
4372 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4373 // quadratic equation to solve it. To do this, we must frame our problem in
4374 // terms of figuring out when zero is crossed, instead of when
4375 // Range.getUpper() is crossed.
4376 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4377 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4378 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4380 // Next, solve the constructed addrec
4381 std::pair<const SCEV *,const SCEV *> Roots =
4382 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4383 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4384 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4386 // Pick the smallest positive root value.
4387 if (ConstantInt *CB =
4388 dyn_cast<ConstantInt>(
4389 SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4390 R1->getValue(), R2->getValue()))) {
4391 if (CB->getZExtValue() == false)
4392 std::swap(R1, R2); // R1 is the minimum root now.
4394 // Make sure the root is not off by one. The returned iteration should
4395 // not be in the range, but the previous one should be. When solving
4396 // for "X*X < 5", for example, we should not return a root of 2.
4397 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4400 if (Range.contains(R1Val->getValue())) {
4401 // The next iteration must be out of the range...
4402 ConstantInt *NextVal =
4403 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4405 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4406 if (!Range.contains(R1Val->getValue()))
4407 return SE.getConstant(NextVal);
4408 return SE.getCouldNotCompute(); // Something strange happened
4411 // If R1 was not in the range, then it is a good return value. Make
4412 // sure that R1-1 WAS in the range though, just in case.
4413 ConstantInt *NextVal =
4414 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4415 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4416 if (Range.contains(R1Val->getValue()))
4418 return SE.getCouldNotCompute(); // Something strange happened
4423 return SE.getCouldNotCompute();
4428 //===----------------------------------------------------------------------===//
4429 // SCEVCallbackVH Class Implementation
4430 //===----------------------------------------------------------------------===//
4432 void ScalarEvolution::SCEVCallbackVH::deleted() {
4433 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4434 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4435 SE->ConstantEvolutionLoopExitValue.erase(PN);
4436 if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4437 SE->ValuesAtScopes.erase(I);
4438 SE->Scalars.erase(getValPtr());
4439 // this now dangles!
4442 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4443 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4445 // Forget all the expressions associated with users of the old value,
4446 // so that future queries will recompute the expressions using the new
4448 SmallVector<User *, 16> Worklist;
4449 Value *Old = getValPtr();
4450 bool DeleteOld = false;
4451 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4453 Worklist.push_back(*UI);
4454 while (!Worklist.empty()) {
4455 User *U = Worklist.pop_back_val();
4456 // Deleting the Old value will cause this to dangle. Postpone
4457 // that until everything else is done.
4462 if (PHINode *PN = dyn_cast<PHINode>(U))
4463 SE->ConstantEvolutionLoopExitValue.erase(PN);
4464 if (Instruction *I = dyn_cast<Instruction>(U))
4465 SE->ValuesAtScopes.erase(I);
4466 if (SE->Scalars.erase(U))
4467 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4469 Worklist.push_back(*UI);
4472 if (PHINode *PN = dyn_cast<PHINode>(Old))
4473 SE->ConstantEvolutionLoopExitValue.erase(PN);
4474 if (Instruction *I = dyn_cast<Instruction>(Old))
4475 SE->ValuesAtScopes.erase(I);
4476 SE->Scalars.erase(Old);
4477 // this now dangles!
4482 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4483 : CallbackVH(V), SE(se) {}
4485 //===----------------------------------------------------------------------===//
4486 // ScalarEvolution Class Implementation
4487 //===----------------------------------------------------------------------===//
4489 ScalarEvolution::ScalarEvolution()
4490 : FunctionPass(&ID) {
4493 bool ScalarEvolution::runOnFunction(Function &F) {
4495 LI = &getAnalysis<LoopInfo>();
4496 TD = getAnalysisIfAvailable<TargetData>();
4500 void ScalarEvolution::releaseMemory() {
4502 BackedgeTakenCounts.clear();
4503 ConstantEvolutionLoopExitValue.clear();
4504 ValuesAtScopes.clear();
4505 UniqueSCEVs.clear();
4506 SCEVAllocator.Reset();
4509 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4510 AU.setPreservesAll();
4511 AU.addRequiredTransitive<LoopInfo>();
4514 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4515 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4518 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4520 // Print all inner loops first
4521 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4522 PrintLoopInfo(OS, SE, *I);
4524 OS << "Loop " << L->getHeader()->getName() << ": ";
4526 SmallVector<BasicBlock*, 8> ExitBlocks;
4527 L->getExitBlocks(ExitBlocks);
4528 if (ExitBlocks.size() != 1)
4529 OS << "<multiple exits> ";
4531 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4532 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4534 OS << "Unpredictable backedge-taken count. ";
4538 OS << "Loop " << L->getHeader()->getName() << ": ";
4540 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4541 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4543 OS << "Unpredictable max backedge-taken count. ";
4549 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4550 // ScalarEvolution's implementaiton of the print method is to print
4551 // out SCEV values of all instructions that are interesting. Doing
4552 // this potentially causes it to create new SCEV objects though,
4553 // which technically conflicts with the const qualifier. This isn't
4554 // observable from outside the class though, so casting away the
4555 // const isn't dangerous.
4556 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4558 OS << "Classifying expressions for: " << F->getName() << "\n";
4559 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4560 if (isSCEVable(I->getType())) {
4563 const SCEV *SV = SE.getSCEV(&*I);
4566 const Loop *L = LI->getLoopFor((*I).getParent());
4568 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4575 OS << "\t\t" "Exits: ";
4576 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4577 if (!ExitValue->isLoopInvariant(L)) {
4578 OS << "<<Unknown>>";
4587 OS << "Determining loop execution counts for: " << F->getName() << "\n";
4588 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4589 PrintLoopInfo(OS, &SE, *I);
4592 void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4593 raw_os_ostream OS(o);