1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. These classes are reference counted, managed by the const SCEV*
18 // class. We only create one SCEV of a particular shape, so pointer-comparisons
19 // for equality are legal.
21 // One important aspect of the SCEV objects is that they are never cyclic, even
22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
24 // recurrence) then we represent it directly as a recurrence node, otherwise we
25 // represent it as a SCEVUnknown node.
27 // In addition to being able to represent expressions of various types, we also
28 // have folders that are used to build the *canonical* representation for a
29 // particular expression. These folders are capable of using a variety of
30 // rewrite rules to simplify the expressions.
32 // Once the folders are defined, we can implement the more interesting
33 // higher-level code, such as the code that recognizes PHI nodes of various
34 // types, computes the execution count of a loop, etc.
36 // TODO: We should use these routines and value representations to implement
37 // dependence analysis!
39 //===----------------------------------------------------------------------===//
41 // There are several good references for the techniques used in this analysis.
43 // Chains of recurrences -- a method to expedite the evaluation
44 // of closed-form functions
45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
47 // On computational properties of chains of recurrences
50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51 // Robert A. van Engelen
53 // Efficient Symbolic Analysis for Optimizing Compilers
54 // Robert A. van Engelen
56 // Using the chains of recurrences algebra for data dependence testing and
57 // induction variable substitution
58 // MS Thesis, Johnie Birch
60 //===----------------------------------------------------------------------===//
62 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
64 #include "llvm/Constants.h"
65 #include "llvm/DerivedTypes.h"
66 #include "llvm/GlobalVariable.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/Analysis/ConstantFolding.h"
69 #include "llvm/Analysis/Dominators.h"
70 #include "llvm/Analysis/LoopInfo.h"
71 #include "llvm/Analysis/ValueTracking.h"
72 #include "llvm/Assembly/Writer.h"
73 #include "llvm/Target/TargetData.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Compiler.h"
76 #include "llvm/Support/ConstantRange.h"
77 #include "llvm/Support/GetElementPtrTypeIterator.h"
78 #include "llvm/Support/InstIterator.h"
79 #include "llvm/Support/MathExtras.h"
80 #include "llvm/Support/raw_ostream.h"
81 #include "llvm/ADT/Statistic.h"
82 #include "llvm/ADT/STLExtras.h"
86 STATISTIC(NumArrayLenItCounts,
87 "Number of trip counts computed with array length");
88 STATISTIC(NumTripCountsComputed,
89 "Number of loops with predictable loop counts");
90 STATISTIC(NumTripCountsNotComputed,
91 "Number of loops without predictable loop counts");
92 STATISTIC(NumBruteForceTripCountsComputed,
93 "Number of loops with trip counts computed by force");
95 static cl::opt<unsigned>
96 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
97 cl::desc("Maximum number of iterations SCEV will "
98 "symbolically execute a constant "
102 static RegisterPass<ScalarEvolution>
103 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
104 char ScalarEvolution::ID = 0;
106 //===----------------------------------------------------------------------===//
107 // SCEV class definitions
108 //===----------------------------------------------------------------------===//
110 //===----------------------------------------------------------------------===//
111 // Implementation of the SCEV class.
114 void SCEV::dump() const {
119 void SCEV::print(std::ostream &o) const {
120 raw_os_ostream OS(o);
124 bool SCEV::isZero() const {
125 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
126 return SC->getValue()->isZero();
130 bool SCEV::isOne() const {
131 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132 return SC->getValue()->isOne();
136 bool SCEV::isAllOnesValue() const {
137 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
138 return SC->getValue()->isAllOnesValue();
142 SCEVCouldNotCompute::SCEVCouldNotCompute() :
143 SCEV(scCouldNotCompute) {}
145 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
146 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
150 const Type *SCEVCouldNotCompute::getType() const {
151 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
155 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
156 assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
161 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
164 ScalarEvolution &SE) const {
168 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
169 OS << "***COULDNOTCOMPUTE***";
172 bool SCEVCouldNotCompute::classof(const SCEV *S) {
173 return S->getSCEVType() == scCouldNotCompute;
176 const SCEV* ScalarEvolution::getConstant(ConstantInt *V) {
177 SCEVConstant *&R = SCEVConstants[V];
178 if (R == 0) R = new SCEVConstant(V);
182 const SCEV* ScalarEvolution::getConstant(const APInt& Val) {
183 return getConstant(ConstantInt::get(Val));
187 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
188 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
191 const Type *SCEVConstant::getType() const { return V->getType(); }
193 void SCEVConstant::print(raw_ostream &OS) const {
194 WriteAsOperand(OS, V, false);
197 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy,
198 const SCEV* op, const Type *ty)
199 : SCEV(SCEVTy), Op(op), Ty(ty) {}
201 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
202 return Op->dominates(BB, DT);
205 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty)
206 : SCEVCastExpr(scTruncate, op, ty) {
207 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
208 (Ty->isInteger() || isa<PointerType>(Ty)) &&
209 "Cannot truncate non-integer value!");
212 void SCEVTruncateExpr::print(raw_ostream &OS) const {
213 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
216 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty)
217 : SCEVCastExpr(scZeroExtend, op, ty) {
218 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
219 (Ty->isInteger() || isa<PointerType>(Ty)) &&
220 "Cannot zero extend non-integer value!");
223 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
224 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
227 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty)
228 : SCEVCastExpr(scSignExtend, op, ty) {
229 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
230 (Ty->isInteger() || isa<PointerType>(Ty)) &&
231 "Cannot sign extend non-integer value!");
234 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
235 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
238 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
239 assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
240 const char *OpStr = getOperationStr();
241 OS << "(" << *Operands[0];
242 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
243 OS << OpStr << *Operands[i];
248 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
251 ScalarEvolution &SE) const {
252 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
254 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
255 if (H != getOperand(i)) {
256 SmallVector<const SCEV*, 8> NewOps;
257 NewOps.reserve(getNumOperands());
258 for (unsigned j = 0; j != i; ++j)
259 NewOps.push_back(getOperand(j));
261 for (++i; i != e; ++i)
262 NewOps.push_back(getOperand(i)->
263 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
265 if (isa<SCEVAddExpr>(this))
266 return SE.getAddExpr(NewOps);
267 else if (isa<SCEVMulExpr>(this))
268 return SE.getMulExpr(NewOps);
269 else if (isa<SCEVSMaxExpr>(this))
270 return SE.getSMaxExpr(NewOps);
271 else if (isa<SCEVUMaxExpr>(this))
272 return SE.getUMaxExpr(NewOps);
274 assert(0 && "Unknown commutative expr!");
280 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
281 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
282 if (!getOperand(i)->dominates(BB, DT))
288 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
289 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
292 void SCEVUDivExpr::print(raw_ostream &OS) const {
293 OS << "(" << *LHS << " /u " << *RHS << ")";
296 const Type *SCEVUDivExpr::getType() const {
297 // In most cases the types of LHS and RHS will be the same, but in some
298 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
299 // depend on the type for correctness, but handling types carefully can
300 // avoid extra casts in the SCEVExpander. The LHS is more likely to be
301 // a pointer type than the RHS, so use the RHS' type here.
302 return RHS->getType();
306 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
308 ScalarEvolution &SE) const {
309 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
311 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
312 if (H != getOperand(i)) {
313 SmallVector<const SCEV*, 8> NewOps;
314 NewOps.reserve(getNumOperands());
315 for (unsigned j = 0; j != i; ++j)
316 NewOps.push_back(getOperand(j));
318 for (++i; i != e; ++i)
319 NewOps.push_back(getOperand(i)->
320 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
322 return SE.getAddRecExpr(NewOps, L);
329 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
330 // This recurrence is invariant w.r.t to QueryLoop iff QueryLoop doesn't
331 // contain L and if the start is invariant.
332 // Add recurrences are never invariant in the function-body (null loop).
334 !QueryLoop->contains(L->getHeader()) &&
335 getOperand(0)->isLoopInvariant(QueryLoop);
339 void SCEVAddRecExpr::print(raw_ostream &OS) const {
340 OS << "{" << *Operands[0];
341 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
342 OS << ",+," << *Operands[i];
343 OS << "}<" << L->getHeader()->getName() + ">";
346 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
347 // All non-instruction values are loop invariant. All instructions are loop
348 // invariant if they are not contained in the specified loop.
349 // Instructions are never considered invariant in the function body
350 // (null loop) because they are defined within the "loop".
351 if (Instruction *I = dyn_cast<Instruction>(V))
352 return L && !L->contains(I->getParent());
356 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
357 if (Instruction *I = dyn_cast<Instruction>(getValue()))
358 return DT->dominates(I->getParent(), BB);
362 const Type *SCEVUnknown::getType() const {
366 void SCEVUnknown::print(raw_ostream &OS) const {
367 WriteAsOperand(OS, V, false);
370 //===----------------------------------------------------------------------===//
372 //===----------------------------------------------------------------------===//
375 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
376 /// than the complexity of the RHS. This comparator is used to canonicalize
378 class VISIBILITY_HIDDEN SCEVComplexityCompare {
381 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
383 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
384 // Primarily, sort the SCEVs by their getSCEVType().
385 if (LHS->getSCEVType() != RHS->getSCEVType())
386 return LHS->getSCEVType() < RHS->getSCEVType();
388 // Aside from the getSCEVType() ordering, the particular ordering
389 // isn't very important except that it's beneficial to be consistent,
390 // so that (a + b) and (b + a) don't end up as different expressions.
392 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
393 // not as complete as it could be.
394 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
395 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
397 // Order pointer values after integer values. This helps SCEVExpander
399 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
401 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
404 // Compare getValueID values.
405 if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
406 return LU->getValue()->getValueID() < RU->getValue()->getValueID();
408 // Sort arguments by their position.
409 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
410 const Argument *RA = cast<Argument>(RU->getValue());
411 return LA->getArgNo() < RA->getArgNo();
414 // For instructions, compare their loop depth, and their opcode.
415 // This is pretty loose.
416 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
417 Instruction *RV = cast<Instruction>(RU->getValue());
419 // Compare loop depths.
420 if (LI->getLoopDepth(LV->getParent()) !=
421 LI->getLoopDepth(RV->getParent()))
422 return LI->getLoopDepth(LV->getParent()) <
423 LI->getLoopDepth(RV->getParent());
426 if (LV->getOpcode() != RV->getOpcode())
427 return LV->getOpcode() < RV->getOpcode();
429 // Compare the number of operands.
430 if (LV->getNumOperands() != RV->getNumOperands())
431 return LV->getNumOperands() < RV->getNumOperands();
437 // Compare constant values.
438 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
439 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
440 return LC->getValue()->getValue().ult(RC->getValue()->getValue());
443 // Compare addrec loop depths.
444 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
445 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
446 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
447 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
450 // Lexicographically compare n-ary expressions.
451 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
452 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
453 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
454 if (i >= RC->getNumOperands())
456 if (operator()(LC->getOperand(i), RC->getOperand(i)))
458 if (operator()(RC->getOperand(i), LC->getOperand(i)))
461 return LC->getNumOperands() < RC->getNumOperands();
464 // Lexicographically compare udiv expressions.
465 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
466 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
467 if (operator()(LC->getLHS(), RC->getLHS()))
469 if (operator()(RC->getLHS(), LC->getLHS()))
471 if (operator()(LC->getRHS(), RC->getRHS()))
473 if (operator()(RC->getRHS(), LC->getRHS()))
478 // Compare cast expressions by operand.
479 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
480 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
481 return operator()(LC->getOperand(), RC->getOperand());
484 assert(0 && "Unknown SCEV kind!");
490 /// GroupByComplexity - Given a list of SCEV objects, order them by their
491 /// complexity, and group objects of the same complexity together by value.
492 /// When this routine is finished, we know that any duplicates in the vector are
493 /// consecutive and that complexity is monotonically increasing.
495 /// Note that we go take special precautions to ensure that we get determinstic
496 /// results from this routine. In other words, we don't want the results of
497 /// this to depend on where the addresses of various SCEV objects happened to
500 static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops,
502 if (Ops.size() < 2) return; // Noop
503 if (Ops.size() == 2) {
504 // This is the common case, which also happens to be trivially simple.
506 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
507 std::swap(Ops[0], Ops[1]);
511 // Do the rough sort by complexity.
512 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
514 // Now that we are sorted by complexity, group elements of the same
515 // complexity. Note that this is, at worst, N^2, but the vector is likely to
516 // be extremely short in practice. Note that we take this approach because we
517 // do not want to depend on the addresses of the objects we are grouping.
518 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
519 const SCEV *S = Ops[i];
520 unsigned Complexity = S->getSCEVType();
522 // If there are any objects of the same complexity and same value as this
524 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
525 if (Ops[j] == S) { // Found a duplicate.
526 // Move it to immediately after i'th element.
527 std::swap(Ops[i+1], Ops[j]);
528 ++i; // no need to rescan it.
529 if (i == e-2) return; // Done!
537 //===----------------------------------------------------------------------===//
538 // Simple SCEV method implementations
539 //===----------------------------------------------------------------------===//
541 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
543 static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K,
545 const Type* ResultTy) {
546 // Handle the simplest case efficiently.
548 return SE.getTruncateOrZeroExtend(It, ResultTy);
550 // We are using the following formula for BC(It, K):
552 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
554 // Suppose, W is the bitwidth of the return value. We must be prepared for
555 // overflow. Hence, we must assure that the result of our computation is
556 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
557 // safe in modular arithmetic.
559 // However, this code doesn't use exactly that formula; the formula it uses
560 // is something like the following, where T is the number of factors of 2 in
561 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
564 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
566 // This formula is trivially equivalent to the previous formula. However,
567 // this formula can be implemented much more efficiently. The trick is that
568 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
569 // arithmetic. To do exact division in modular arithmetic, all we have
570 // to do is multiply by the inverse. Therefore, this step can be done at
573 // The next issue is how to safely do the division by 2^T. The way this
574 // is done is by doing the multiplication step at a width of at least W + T
575 // bits. This way, the bottom W+T bits of the product are accurate. Then,
576 // when we perform the division by 2^T (which is equivalent to a right shift
577 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
578 // truncated out after the division by 2^T.
580 // In comparison to just directly using the first formula, this technique
581 // is much more efficient; using the first formula requires W * K bits,
582 // but this formula less than W + K bits. Also, the first formula requires
583 // a division step, whereas this formula only requires multiplies and shifts.
585 // It doesn't matter whether the subtraction step is done in the calculation
586 // width or the input iteration count's width; if the subtraction overflows,
587 // the result must be zero anyway. We prefer here to do it in the width of
588 // the induction variable because it helps a lot for certain cases; CodeGen
589 // isn't smart enough to ignore the overflow, which leads to much less
590 // efficient code if the width of the subtraction is wider than the native
593 // (It's possible to not widen at all by pulling out factors of 2 before
594 // the multiplication; for example, K=2 can be calculated as
595 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
596 // extra arithmetic, so it's not an obvious win, and it gets
597 // much more complicated for K > 3.)
599 // Protection from insane SCEVs; this bound is conservative,
600 // but it probably doesn't matter.
602 return SE.getCouldNotCompute();
604 unsigned W = SE.getTypeSizeInBits(ResultTy);
606 // Calculate K! / 2^T and T; we divide out the factors of two before
607 // multiplying for calculating K! / 2^T to avoid overflow.
608 // Other overflow doesn't matter because we only care about the bottom
609 // W bits of the result.
610 APInt OddFactorial(W, 1);
612 for (unsigned i = 3; i <= K; ++i) {
614 unsigned TwoFactors = Mult.countTrailingZeros();
616 Mult = Mult.lshr(TwoFactors);
617 OddFactorial *= Mult;
620 // We need at least W + T bits for the multiplication step
621 unsigned CalculationBits = W + T;
623 // Calcuate 2^T, at width T+W.
624 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
626 // Calculate the multiplicative inverse of K! / 2^T;
627 // this multiplication factor will perform the exact division by
629 APInt Mod = APInt::getSignedMinValue(W+1);
630 APInt MultiplyFactor = OddFactorial.zext(W+1);
631 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
632 MultiplyFactor = MultiplyFactor.trunc(W);
634 // Calculate the product, at width T+W
635 const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
636 const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
637 for (unsigned i = 1; i != K; ++i) {
638 const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
639 Dividend = SE.getMulExpr(Dividend,
640 SE.getTruncateOrZeroExtend(S, CalculationTy));
644 const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
646 // Truncate the result, and divide by K! / 2^T.
648 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
649 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
652 /// evaluateAtIteration - Return the value of this chain of recurrences at
653 /// the specified iteration number. We can evaluate this recurrence by
654 /// multiplying each element in the chain by the binomial coefficient
655 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
657 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
659 /// where BC(It, k) stands for binomial coefficient.
661 const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It,
662 ScalarEvolution &SE) const {
663 const SCEV* Result = getStart();
664 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
665 // The computation is correct in the face of overflow provided that the
666 // multiplication is performed _after_ the evaluation of the binomial
668 const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType());
669 if (isa<SCEVCouldNotCompute>(Coeff))
672 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
677 //===----------------------------------------------------------------------===//
678 // SCEV Expression folder implementations
679 //===----------------------------------------------------------------------===//
681 const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op,
683 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
684 "This is not a truncating conversion!");
685 assert(isSCEVable(Ty) &&
686 "This is not a conversion to a SCEVable type!");
687 Ty = getEffectiveSCEVType(Ty);
689 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
691 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
693 // trunc(trunc(x)) --> trunc(x)
694 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
695 return getTruncateExpr(ST->getOperand(), Ty);
697 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
698 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
699 return getTruncateOrSignExtend(SS->getOperand(), Ty);
701 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
702 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
703 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
705 // If the input value is a chrec scev, truncate the chrec's operands.
706 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
707 SmallVector<const SCEV*, 4> Operands;
708 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
709 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
710 return getAddRecExpr(Operands, AddRec->getLoop());
713 SCEVTruncateExpr *&Result = SCEVTruncates[std::make_pair(Op, Ty)];
714 if (Result == 0) Result = new SCEVTruncateExpr(Op, Ty);
718 const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op,
720 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
721 "This is not an extending conversion!");
722 assert(isSCEVable(Ty) &&
723 "This is not a conversion to a SCEVable type!");
724 Ty = getEffectiveSCEVType(Ty);
726 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
727 const Type *IntTy = getEffectiveSCEVType(Ty);
728 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
729 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
730 return getConstant(cast<ConstantInt>(C));
733 // zext(zext(x)) --> zext(x)
734 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
735 return getZeroExtendExpr(SZ->getOperand(), Ty);
737 // If the input value is a chrec scev, and we can prove that the value
738 // did not overflow the old, smaller, value, we can zero extend all of the
739 // operands (often constants). This allows analysis of something like
740 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
741 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
742 if (AR->isAffine()) {
743 // Check whether the backedge-taken count is SCEVCouldNotCompute.
744 // Note that this serves two purposes: It filters out loops that are
745 // simply not analyzable, and it covers the case where this code is
746 // being called from within backedge-taken count analysis, such that
747 // attempting to ask for the backedge-taken count would likely result
748 // in infinite recursion. In the later case, the analysis code will
749 // cope with a conservative value, and it will take care to purge
750 // that value once it has finished.
751 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
752 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
753 // Manually compute the final value for AR, checking for
755 const SCEV* Start = AR->getStart();
756 const SCEV* Step = AR->getStepRecurrence(*this);
758 // Check whether the backedge-taken count can be losslessly casted to
759 // the addrec's type. The count is always unsigned.
760 const SCEV* CastedMaxBECount =
761 getTruncateOrZeroExtend(MaxBECount, Start->getType());
762 const SCEV* RecastedMaxBECount =
763 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
764 if (MaxBECount == RecastedMaxBECount) {
766 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
767 // Check whether Start+Step*MaxBECount has no unsigned overflow.
769 getMulExpr(CastedMaxBECount,
770 getTruncateOrZeroExtend(Step, Start->getType()));
771 const SCEV* Add = getAddExpr(Start, ZMul);
772 const SCEV* OperandExtendedAdd =
773 getAddExpr(getZeroExtendExpr(Start, WideTy),
774 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
775 getZeroExtendExpr(Step, WideTy)));
776 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
777 // Return the expression with the addrec on the outside.
778 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
779 getZeroExtendExpr(Step, Ty),
782 // Similar to above, only this time treat the step value as signed.
783 // This covers loops that count down.
785 getMulExpr(CastedMaxBECount,
786 getTruncateOrSignExtend(Step, Start->getType()));
787 Add = getAddExpr(Start, SMul);
789 getAddExpr(getZeroExtendExpr(Start, WideTy),
790 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
791 getSignExtendExpr(Step, WideTy)));
792 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
793 // Return the expression with the addrec on the outside.
794 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
795 getSignExtendExpr(Step, Ty),
801 SCEVZeroExtendExpr *&Result = SCEVZeroExtends[std::make_pair(Op, Ty)];
802 if (Result == 0) Result = new SCEVZeroExtendExpr(Op, Ty);
806 const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op,
808 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
809 "This is not an extending conversion!");
810 assert(isSCEVable(Ty) &&
811 "This is not a conversion to a SCEVable type!");
812 Ty = getEffectiveSCEVType(Ty);
814 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
815 const Type *IntTy = getEffectiveSCEVType(Ty);
816 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
817 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
818 return getConstant(cast<ConstantInt>(C));
821 // sext(sext(x)) --> sext(x)
822 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
823 return getSignExtendExpr(SS->getOperand(), Ty);
825 // If the input value is a chrec scev, and we can prove that the value
826 // did not overflow the old, smaller, value, we can sign extend all of the
827 // operands (often constants). This allows analysis of something like
828 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
829 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
830 if (AR->isAffine()) {
831 // Check whether the backedge-taken count is SCEVCouldNotCompute.
832 // Note that this serves two purposes: It filters out loops that are
833 // simply not analyzable, and it covers the case where this code is
834 // being called from within backedge-taken count analysis, such that
835 // attempting to ask for the backedge-taken count would likely result
836 // in infinite recursion. In the later case, the analysis code will
837 // cope with a conservative value, and it will take care to purge
838 // that value once it has finished.
839 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
840 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
841 // Manually compute the final value for AR, checking for
843 const SCEV* Start = AR->getStart();
844 const SCEV* Step = AR->getStepRecurrence(*this);
846 // Check whether the backedge-taken count can be losslessly casted to
847 // the addrec's type. The count is always unsigned.
848 const SCEV* CastedMaxBECount =
849 getTruncateOrZeroExtend(MaxBECount, Start->getType());
850 const SCEV* RecastedMaxBECount =
851 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
852 if (MaxBECount == RecastedMaxBECount) {
854 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
855 // Check whether Start+Step*MaxBECount has no signed overflow.
857 getMulExpr(CastedMaxBECount,
858 getTruncateOrSignExtend(Step, Start->getType()));
859 const SCEV* Add = getAddExpr(Start, SMul);
860 const SCEV* OperandExtendedAdd =
861 getAddExpr(getSignExtendExpr(Start, WideTy),
862 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
863 getSignExtendExpr(Step, WideTy)));
864 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
865 // Return the expression with the addrec on the outside.
866 return getAddRecExpr(getSignExtendExpr(Start, Ty),
867 getSignExtendExpr(Step, Ty),
873 SCEVSignExtendExpr *&Result = SCEVSignExtends[std::make_pair(Op, Ty)];
874 if (Result == 0) Result = new SCEVSignExtendExpr(Op, Ty);
878 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
879 /// unspecified bits out to the given type.
881 const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op,
883 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
884 "This is not an extending conversion!");
885 assert(isSCEVable(Ty) &&
886 "This is not a conversion to a SCEVable type!");
887 Ty = getEffectiveSCEVType(Ty);
889 // Sign-extend negative constants.
890 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
891 if (SC->getValue()->getValue().isNegative())
892 return getSignExtendExpr(Op, Ty);
894 // Peel off a truncate cast.
895 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
896 const SCEV* NewOp = T->getOperand();
897 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
898 return getAnyExtendExpr(NewOp, Ty);
899 return getTruncateOrNoop(NewOp, Ty);
902 // Next try a zext cast. If the cast is folded, use it.
903 const SCEV* ZExt = getZeroExtendExpr(Op, Ty);
904 if (!isa<SCEVZeroExtendExpr>(ZExt))
907 // Next try a sext cast. If the cast is folded, use it.
908 const SCEV* SExt = getSignExtendExpr(Op, Ty);
909 if (!isa<SCEVSignExtendExpr>(SExt))
912 // If the expression is obviously signed, use the sext cast value.
913 if (isa<SCEVSMaxExpr>(Op))
916 // Absent any other information, use the zext cast value.
920 /// CollectAddOperandsWithScales - Process the given Ops list, which is
921 /// a list of operands to be added under the given scale, update the given
922 /// map. This is a helper function for getAddRecExpr. As an example of
923 /// what it does, given a sequence of operands that would form an add
924 /// expression like this:
926 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
928 /// where A and B are constants, update the map with these values:
930 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
932 /// and add 13 + A*B*29 to AccumulatedConstant.
933 /// This will allow getAddRecExpr to produce this:
935 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
937 /// This form often exposes folding opportunities that are hidden in
938 /// the original operand list.
940 /// Return true iff it appears that any interesting folding opportunities
941 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
942 /// the common case where no interesting opportunities are present, and
943 /// is also used as a check to avoid infinite recursion.
946 CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M,
947 SmallVector<const SCEV*, 8> &NewOps,
948 APInt &AccumulatedConstant,
949 const SmallVectorImpl<const SCEV*> &Ops,
951 ScalarEvolution &SE) {
952 bool Interesting = false;
954 // Iterate over the add operands.
955 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
956 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
957 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
959 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
960 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
961 // A multiplication of a constant with another add; recurse.
963 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
964 cast<SCEVAddExpr>(Mul->getOperand(1))
968 // A multiplication of a constant with some other value. Update
970 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
971 const SCEV* Key = SE.getMulExpr(MulOps);
972 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair =
973 M.insert(std::make_pair(Key, APInt()));
975 Pair.first->second = NewScale;
976 NewOps.push_back(Pair.first->first);
978 Pair.first->second += NewScale;
979 // The map already had an entry for this value, which may indicate
980 // a folding opportunity.
984 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
985 // Pull a buried constant out to the outside.
986 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
988 AccumulatedConstant += Scale * C->getValue()->getValue();
990 // An ordinary operand. Update the map.
991 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair =
992 M.insert(std::make_pair(Ops[i], APInt()));
994 Pair.first->second = Scale;
995 NewOps.push_back(Pair.first->first);
997 Pair.first->second += Scale;
998 // The map already had an entry for this value, which may indicate
999 // a folding opportunity.
1009 struct APIntCompare {
1010 bool operator()(const APInt &LHS, const APInt &RHS) const {
1011 return LHS.ult(RHS);
1016 /// getAddExpr - Get a canonical add expression, or something simpler if
1018 const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) {
1019 assert(!Ops.empty() && "Cannot get empty add!");
1020 if (Ops.size() == 1) return Ops[0];
1022 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1023 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1024 getEffectiveSCEVType(Ops[0]->getType()) &&
1025 "SCEVAddExpr operand types don't match!");
1028 // Sort by complexity, this groups all similar expression types together.
1029 GroupByComplexity(Ops, LI);
1031 // If there are any constants, fold them together.
1033 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1035 assert(Idx < Ops.size());
1036 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1037 // We found two constants, fold them together!
1038 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1039 RHSC->getValue()->getValue());
1040 if (Ops.size() == 2) return Ops[0];
1041 Ops.erase(Ops.begin()+1); // Erase the folded element
1042 LHSC = cast<SCEVConstant>(Ops[0]);
1045 // If we are left with a constant zero being added, strip it off.
1046 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1047 Ops.erase(Ops.begin());
1052 if (Ops.size() == 1) return Ops[0];
1054 // Okay, check to see if the same value occurs in the operand list twice. If
1055 // so, merge them together into an multiply expression. Since we sorted the
1056 // list, these values are required to be adjacent.
1057 const Type *Ty = Ops[0]->getType();
1058 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1059 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1060 // Found a match, merge the two values into a multiply, and add any
1061 // remaining values to the result.
1062 const SCEV* Two = getIntegerSCEV(2, Ty);
1063 const SCEV* Mul = getMulExpr(Ops[i], Two);
1064 if (Ops.size() == 2)
1066 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1068 return getAddExpr(Ops);
1071 // Check for truncates. If all the operands are truncated from the same
1072 // type, see if factoring out the truncate would permit the result to be
1073 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1074 // if the contents of the resulting outer trunc fold to something simple.
1075 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1076 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1077 const Type *DstType = Trunc->getType();
1078 const Type *SrcType = Trunc->getOperand()->getType();
1079 SmallVector<const SCEV*, 8> LargeOps;
1081 // Check all the operands to see if they can be represented in the
1082 // source type of the truncate.
1083 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1084 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1085 if (T->getOperand()->getType() != SrcType) {
1089 LargeOps.push_back(T->getOperand());
1090 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1091 // This could be either sign or zero extension, but sign extension
1092 // is much more likely to be foldable here.
1093 LargeOps.push_back(getSignExtendExpr(C, SrcType));
1094 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1095 SmallVector<const SCEV*, 8> LargeMulOps;
1096 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1097 if (const SCEVTruncateExpr *T =
1098 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1099 if (T->getOperand()->getType() != SrcType) {
1103 LargeMulOps.push_back(T->getOperand());
1104 } else if (const SCEVConstant *C =
1105 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1106 // This could be either sign or zero extension, but sign extension
1107 // is much more likely to be foldable here.
1108 LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1115 LargeOps.push_back(getMulExpr(LargeMulOps));
1122 // Evaluate the expression in the larger type.
1123 const SCEV* Fold = getAddExpr(LargeOps);
1124 // If it folds to something simple, use it. Otherwise, don't.
1125 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1126 return getTruncateExpr(Fold, DstType);
1130 // Skip past any other cast SCEVs.
1131 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1134 // If there are add operands they would be next.
1135 if (Idx < Ops.size()) {
1136 bool DeletedAdd = false;
1137 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1138 // If we have an add, expand the add operands onto the end of the operands
1140 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1141 Ops.erase(Ops.begin()+Idx);
1145 // If we deleted at least one add, we added operands to the end of the list,
1146 // and they are not necessarily sorted. Recurse to resort and resimplify
1147 // any operands we just aquired.
1149 return getAddExpr(Ops);
1152 // Skip over the add expression until we get to a multiply.
1153 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1156 // Check to see if there are any folding opportunities present with
1157 // operands multiplied by constant values.
1158 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1159 uint64_t BitWidth = getTypeSizeInBits(Ty);
1160 DenseMap<const SCEV*, APInt> M;
1161 SmallVector<const SCEV*, 8> NewOps;
1162 APInt AccumulatedConstant(BitWidth, 0);
1163 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1164 Ops, APInt(BitWidth, 1), *this)) {
1165 // Some interesting folding opportunity is present, so its worthwhile to
1166 // re-generate the operands list. Group the operands by constant scale,
1167 // to avoid multiplying by the same constant scale multiple times.
1168 std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists;
1169 for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(),
1170 E = NewOps.end(); I != E; ++I)
1171 MulOpLists[M.find(*I)->second].push_back(*I);
1172 // Re-generate the operands list.
1174 if (AccumulatedConstant != 0)
1175 Ops.push_back(getConstant(AccumulatedConstant));
1176 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1177 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1179 Ops.push_back(getMulExpr(getConstant(I->first),
1180 getAddExpr(I->second)));
1182 return getIntegerSCEV(0, Ty);
1183 if (Ops.size() == 1)
1185 return getAddExpr(Ops);
1189 // If we are adding something to a multiply expression, make sure the
1190 // something is not already an operand of the multiply. If so, merge it into
1192 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1193 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1194 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1195 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1196 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1197 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1198 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1199 const SCEV* InnerMul = Mul->getOperand(MulOp == 0);
1200 if (Mul->getNumOperands() != 2) {
1201 // If the multiply has more than two operands, we must get the
1203 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end());
1204 MulOps.erase(MulOps.begin()+MulOp);
1205 InnerMul = getMulExpr(MulOps);
1207 const SCEV* One = getIntegerSCEV(1, Ty);
1208 const SCEV* AddOne = getAddExpr(InnerMul, One);
1209 const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1210 if (Ops.size() == 2) return OuterMul;
1212 Ops.erase(Ops.begin()+AddOp);
1213 Ops.erase(Ops.begin()+Idx-1);
1215 Ops.erase(Ops.begin()+Idx);
1216 Ops.erase(Ops.begin()+AddOp-1);
1218 Ops.push_back(OuterMul);
1219 return getAddExpr(Ops);
1222 // Check this multiply against other multiplies being added together.
1223 for (unsigned OtherMulIdx = Idx+1;
1224 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1226 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1227 // If MulOp occurs in OtherMul, we can fold the two multiplies
1229 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1230 OMulOp != e; ++OMulOp)
1231 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1232 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1233 const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0);
1234 if (Mul->getNumOperands() != 2) {
1235 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1237 MulOps.erase(MulOps.begin()+MulOp);
1238 InnerMul1 = getMulExpr(MulOps);
1240 const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1241 if (OtherMul->getNumOperands() != 2) {
1242 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1243 OtherMul->op_end());
1244 MulOps.erase(MulOps.begin()+OMulOp);
1245 InnerMul2 = getMulExpr(MulOps);
1247 const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1248 const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1249 if (Ops.size() == 2) return OuterMul;
1250 Ops.erase(Ops.begin()+Idx);
1251 Ops.erase(Ops.begin()+OtherMulIdx-1);
1252 Ops.push_back(OuterMul);
1253 return getAddExpr(Ops);
1259 // If there are any add recurrences in the operands list, see if any other
1260 // added values are loop invariant. If so, we can fold them into the
1262 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1265 // Scan over all recurrences, trying to fold loop invariants into them.
1266 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1267 // Scan all of the other operands to this add and add them to the vector if
1268 // they are loop invariant w.r.t. the recurrence.
1269 SmallVector<const SCEV*, 8> LIOps;
1270 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1271 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1272 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1273 LIOps.push_back(Ops[i]);
1274 Ops.erase(Ops.begin()+i);
1278 // If we found some loop invariants, fold them into the recurrence.
1279 if (!LIOps.empty()) {
1280 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1281 LIOps.push_back(AddRec->getStart());
1283 SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(),
1285 AddRecOps[0] = getAddExpr(LIOps);
1287 const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1288 // If all of the other operands were loop invariant, we are done.
1289 if (Ops.size() == 1) return NewRec;
1291 // Otherwise, add the folded AddRec by the non-liv parts.
1292 for (unsigned i = 0;; ++i)
1293 if (Ops[i] == AddRec) {
1297 return getAddExpr(Ops);
1300 // Okay, if there weren't any loop invariants to be folded, check to see if
1301 // there are multiple AddRec's with the same loop induction variable being
1302 // added together. If so, we can fold them.
1303 for (unsigned OtherIdx = Idx+1;
1304 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1305 if (OtherIdx != Idx) {
1306 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1307 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1308 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
1309 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1311 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1312 if (i >= NewOps.size()) {
1313 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1314 OtherAddRec->op_end());
1317 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1319 const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1321 if (Ops.size() == 2) return NewAddRec;
1323 Ops.erase(Ops.begin()+Idx);
1324 Ops.erase(Ops.begin()+OtherIdx-1);
1325 Ops.push_back(NewAddRec);
1326 return getAddExpr(Ops);
1330 // Otherwise couldn't fold anything into this recurrence. Move onto the
1334 // Okay, it looks like we really DO need an add expr. Check to see if we
1335 // already have one, otherwise create a new one.
1336 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1337 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scAddExpr,
1339 if (Result == 0) Result = new SCEVAddExpr(Ops);
1344 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1346 const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) {
1347 assert(!Ops.empty() && "Cannot get empty mul!");
1349 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1350 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1351 getEffectiveSCEVType(Ops[0]->getType()) &&
1352 "SCEVMulExpr operand types don't match!");
1355 // Sort by complexity, this groups all similar expression types together.
1356 GroupByComplexity(Ops, LI);
1358 // If there are any constants, fold them together.
1360 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1362 // C1*(C2+V) -> C1*C2 + C1*V
1363 if (Ops.size() == 2)
1364 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1365 if (Add->getNumOperands() == 2 &&
1366 isa<SCEVConstant>(Add->getOperand(0)))
1367 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1368 getMulExpr(LHSC, Add->getOperand(1)));
1372 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1373 // We found two constants, fold them together!
1374 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1375 RHSC->getValue()->getValue());
1376 Ops[0] = getConstant(Fold);
1377 Ops.erase(Ops.begin()+1); // Erase the folded element
1378 if (Ops.size() == 1) return Ops[0];
1379 LHSC = cast<SCEVConstant>(Ops[0]);
1382 // If we are left with a constant one being multiplied, strip it off.
1383 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1384 Ops.erase(Ops.begin());
1386 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1387 // If we have a multiply of zero, it will always be zero.
1392 // Skip over the add expression until we get to a multiply.
1393 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1396 if (Ops.size() == 1)
1399 // If there are mul operands inline them all into this expression.
1400 if (Idx < Ops.size()) {
1401 bool DeletedMul = false;
1402 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1403 // If we have an mul, expand the mul operands onto the end of the operands
1405 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1406 Ops.erase(Ops.begin()+Idx);
1410 // If we deleted at least one mul, we added operands to the end of the list,
1411 // and they are not necessarily sorted. Recurse to resort and resimplify
1412 // any operands we just aquired.
1414 return getMulExpr(Ops);
1417 // If there are any add recurrences in the operands list, see if any other
1418 // added values are loop invariant. If so, we can fold them into the
1420 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1423 // Scan over all recurrences, trying to fold loop invariants into them.
1424 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1425 // Scan all of the other operands to this mul and add them to the vector if
1426 // they are loop invariant w.r.t. the recurrence.
1427 SmallVector<const SCEV*, 8> LIOps;
1428 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1429 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1430 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1431 LIOps.push_back(Ops[i]);
1432 Ops.erase(Ops.begin()+i);
1436 // If we found some loop invariants, fold them into the recurrence.
1437 if (!LIOps.empty()) {
1438 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1439 SmallVector<const SCEV*, 4> NewOps;
1440 NewOps.reserve(AddRec->getNumOperands());
1441 if (LIOps.size() == 1) {
1442 const SCEV *Scale = LIOps[0];
1443 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1444 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1446 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1447 SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end());
1448 MulOps.push_back(AddRec->getOperand(i));
1449 NewOps.push_back(getMulExpr(MulOps));
1453 const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1455 // If all of the other operands were loop invariant, we are done.
1456 if (Ops.size() == 1) return NewRec;
1458 // Otherwise, multiply the folded AddRec by the non-liv parts.
1459 for (unsigned i = 0;; ++i)
1460 if (Ops[i] == AddRec) {
1464 return getMulExpr(Ops);
1467 // Okay, if there weren't any loop invariants to be folded, check to see if
1468 // there are multiple AddRec's with the same loop induction variable being
1469 // multiplied together. If so, we can fold them.
1470 for (unsigned OtherIdx = Idx+1;
1471 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1472 if (OtherIdx != Idx) {
1473 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1474 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1475 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
1476 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1477 const SCEV* NewStart = getMulExpr(F->getStart(),
1479 const SCEV* B = F->getStepRecurrence(*this);
1480 const SCEV* D = G->getStepRecurrence(*this);
1481 const SCEV* NewStep = getAddExpr(getMulExpr(F, D),
1484 const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep,
1486 if (Ops.size() == 2) return NewAddRec;
1488 Ops.erase(Ops.begin()+Idx);
1489 Ops.erase(Ops.begin()+OtherIdx-1);
1490 Ops.push_back(NewAddRec);
1491 return getMulExpr(Ops);
1495 // Otherwise couldn't fold anything into this recurrence. Move onto the
1499 // Okay, it looks like we really DO need an mul expr. Check to see if we
1500 // already have one, otherwise create a new one.
1501 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1502 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scMulExpr,
1505 Result = new SCEVMulExpr(Ops);
1509 /// getUDivExpr - Get a canonical multiply expression, or something simpler if
1511 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1513 assert(getEffectiveSCEVType(LHS->getType()) ==
1514 getEffectiveSCEVType(RHS->getType()) &&
1515 "SCEVUDivExpr operand types don't match!");
1517 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1518 if (RHSC->getValue()->equalsInt(1))
1519 return LHS; // X udiv 1 --> x
1521 return getIntegerSCEV(0, LHS->getType()); // value is undefined
1523 // Determine if the division can be folded into the operands of
1525 // TODO: Generalize this to non-constants by using known-bits information.
1526 const Type *Ty = LHS->getType();
1527 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1528 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1529 // For non-power-of-two values, effectively round the value up to the
1530 // nearest power of two.
1531 if (!RHSC->getValue()->getValue().isPowerOf2())
1533 const IntegerType *ExtTy =
1534 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1535 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1536 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1537 if (const SCEVConstant *Step =
1538 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1539 if (!Step->getValue()->getValue()
1540 .urem(RHSC->getValue()->getValue()) &&
1541 getZeroExtendExpr(AR, ExtTy) ==
1542 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1543 getZeroExtendExpr(Step, ExtTy),
1545 SmallVector<const SCEV*, 4> Operands;
1546 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1547 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1548 return getAddRecExpr(Operands, AR->getLoop());
1550 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1551 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1552 SmallVector<const SCEV*, 4> Operands;
1553 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1554 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1555 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1556 // Find an operand that's safely divisible.
1557 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1558 const SCEV* Op = M->getOperand(i);
1559 const SCEV* Div = getUDivExpr(Op, RHSC);
1560 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1561 const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands();
1562 Operands = SmallVector<const SCEV*, 4>(MOperands.begin(),
1565 return getMulExpr(Operands);
1569 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1570 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1571 SmallVector<const SCEV*, 4> Operands;
1572 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1573 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1574 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1576 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1577 const SCEV* Op = getUDivExpr(A->getOperand(i), RHS);
1578 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1580 Operands.push_back(Op);
1582 if (Operands.size() == A->getNumOperands())
1583 return getAddExpr(Operands);
1587 // Fold if both operands are constant.
1588 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1589 Constant *LHSCV = LHSC->getValue();
1590 Constant *RHSCV = RHSC->getValue();
1591 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1596 SCEVUDivExpr *&Result = SCEVUDivs[std::make_pair(LHS, RHS)];
1597 if (Result == 0) Result = new SCEVUDivExpr(LHS, RHS);
1602 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1603 /// Simplify the expression as much as possible.
1604 const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start,
1605 const SCEV* Step, const Loop *L) {
1606 SmallVector<const SCEV*, 4> Operands;
1607 Operands.push_back(Start);
1608 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1609 if (StepChrec->getLoop() == L) {
1610 Operands.insert(Operands.end(), StepChrec->op_begin(),
1611 StepChrec->op_end());
1612 return getAddRecExpr(Operands, L);
1615 Operands.push_back(Step);
1616 return getAddRecExpr(Operands, L);
1619 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1620 /// Simplify the expression as much as possible.
1622 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV*> &Operands,
1624 if (Operands.size() == 1) return Operands[0];
1626 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1627 assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1628 getEffectiveSCEVType(Operands[0]->getType()) &&
1629 "SCEVAddRecExpr operand types don't match!");
1632 if (Operands.back()->isZero()) {
1633 Operands.pop_back();
1634 return getAddRecExpr(Operands, L); // {X,+,0} --> X
1637 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1638 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1639 const Loop* NestedLoop = NestedAR->getLoop();
1640 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1641 SmallVector<const SCEV*, 4> NestedOperands(NestedAR->op_begin(),
1642 NestedAR->op_end());
1643 Operands[0] = NestedAR->getStart();
1644 NestedOperands[0] = getAddRecExpr(Operands, L);
1645 return getAddRecExpr(NestedOperands, NestedLoop);
1649 std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end());
1650 SCEVAddRecExpr *&Result = SCEVAddRecExprs[std::make_pair(L, SCEVOps)];
1651 if (Result == 0) Result = new SCEVAddRecExpr(Operands, L);
1655 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1657 SmallVector<const SCEV*, 2> Ops;
1660 return getSMaxExpr(Ops);
1664 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV*> &Ops) {
1665 assert(!Ops.empty() && "Cannot get empty smax!");
1666 if (Ops.size() == 1) return Ops[0];
1668 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1669 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1670 getEffectiveSCEVType(Ops[0]->getType()) &&
1671 "SCEVSMaxExpr operand types don't match!");
1674 // Sort by complexity, this groups all similar expression types together.
1675 GroupByComplexity(Ops, LI);
1677 // If there are any constants, fold them together.
1679 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1681 assert(Idx < Ops.size());
1682 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1683 // We found two constants, fold them together!
1684 ConstantInt *Fold = ConstantInt::get(
1685 APIntOps::smax(LHSC->getValue()->getValue(),
1686 RHSC->getValue()->getValue()));
1687 Ops[0] = getConstant(Fold);
1688 Ops.erase(Ops.begin()+1); // Erase the folded element
1689 if (Ops.size() == 1) return Ops[0];
1690 LHSC = cast<SCEVConstant>(Ops[0]);
1693 // If we are left with a constant minimum-int, strip it off.
1694 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1695 Ops.erase(Ops.begin());
1697 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1698 // If we have an smax with a constant maximum-int, it will always be
1704 if (Ops.size() == 1) return Ops[0];
1706 // Find the first SMax
1707 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1710 // Check to see if one of the operands is an SMax. If so, expand its operands
1711 // onto our operand list, and recurse to simplify.
1712 if (Idx < Ops.size()) {
1713 bool DeletedSMax = false;
1714 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1715 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1716 Ops.erase(Ops.begin()+Idx);
1721 return getSMaxExpr(Ops);
1724 // Okay, check to see if the same value occurs in the operand list twice. If
1725 // so, delete one. Since we sorted the list, these values are required to
1727 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1728 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
1729 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1733 if (Ops.size() == 1) return Ops[0];
1735 assert(!Ops.empty() && "Reduced smax down to nothing!");
1737 // Okay, it looks like we really DO need an smax expr. Check to see if we
1738 // already have one, otherwise create a new one.
1739 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1740 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scSMaxExpr,
1742 if (Result == 0) Result = new SCEVSMaxExpr(Ops);
1746 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1748 SmallVector<const SCEV*, 2> Ops;
1751 return getUMaxExpr(Ops);
1755 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV*> &Ops) {
1756 assert(!Ops.empty() && "Cannot get empty umax!");
1757 if (Ops.size() == 1) return Ops[0];
1759 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1760 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1761 getEffectiveSCEVType(Ops[0]->getType()) &&
1762 "SCEVUMaxExpr operand types don't match!");
1765 // Sort by complexity, this groups all similar expression types together.
1766 GroupByComplexity(Ops, LI);
1768 // If there are any constants, fold them together.
1770 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1772 assert(Idx < Ops.size());
1773 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1774 // We found two constants, fold them together!
1775 ConstantInt *Fold = ConstantInt::get(
1776 APIntOps::umax(LHSC->getValue()->getValue(),
1777 RHSC->getValue()->getValue()));
1778 Ops[0] = getConstant(Fold);
1779 Ops.erase(Ops.begin()+1); // Erase the folded element
1780 if (Ops.size() == 1) return Ops[0];
1781 LHSC = cast<SCEVConstant>(Ops[0]);
1784 // If we are left with a constant minimum-int, strip it off.
1785 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1786 Ops.erase(Ops.begin());
1788 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1789 // If we have an umax with a constant maximum-int, it will always be
1795 if (Ops.size() == 1) return Ops[0];
1797 // Find the first UMax
1798 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1801 // Check to see if one of the operands is a UMax. If so, expand its operands
1802 // onto our operand list, and recurse to simplify.
1803 if (Idx < Ops.size()) {
1804 bool DeletedUMax = false;
1805 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1806 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1807 Ops.erase(Ops.begin()+Idx);
1812 return getUMaxExpr(Ops);
1815 // Okay, check to see if the same value occurs in the operand list twice. If
1816 // so, delete one. Since we sorted the list, these values are required to
1818 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1819 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
1820 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1824 if (Ops.size() == 1) return Ops[0];
1826 assert(!Ops.empty() && "Reduced umax down to nothing!");
1828 // Okay, it looks like we really DO need a umax expr. Check to see if we
1829 // already have one, otherwise create a new one.
1830 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1831 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scUMaxExpr,
1833 if (Result == 0) Result = new SCEVUMaxExpr(Ops);
1837 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1839 // ~smax(~x, ~y) == smin(x, y).
1840 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1843 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1845 // ~umax(~x, ~y) == umin(x, y)
1846 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1849 const SCEV* ScalarEvolution::getUnknown(Value *V) {
1850 // Don't attempt to do anything other than create a SCEVUnknown object
1851 // here. createSCEV only calls getUnknown after checking for all other
1852 // interesting possibilities, and any other code that calls getUnknown
1853 // is doing so in order to hide a value from SCEV canonicalization.
1855 SCEVUnknown *&Result = SCEVUnknowns[V];
1856 if (Result == 0) Result = new SCEVUnknown(V);
1860 //===----------------------------------------------------------------------===//
1861 // Basic SCEV Analysis and PHI Idiom Recognition Code
1864 /// isSCEVable - Test if values of the given type are analyzable within
1865 /// the SCEV framework. This primarily includes integer types, and it
1866 /// can optionally include pointer types if the ScalarEvolution class
1867 /// has access to target-specific information.
1868 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
1869 // Integers are always SCEVable.
1870 if (Ty->isInteger())
1873 // Pointers are SCEVable if TargetData information is available
1874 // to provide pointer size information.
1875 if (isa<PointerType>(Ty))
1878 // Otherwise it's not SCEVable.
1882 /// getTypeSizeInBits - Return the size in bits of the specified type,
1883 /// for which isSCEVable must return true.
1884 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
1885 assert(isSCEVable(Ty) && "Type is not SCEVable!");
1887 // If we have a TargetData, use it!
1889 return TD->getTypeSizeInBits(Ty);
1891 // Otherwise, we support only integer types.
1892 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
1893 return Ty->getPrimitiveSizeInBits();
1896 /// getEffectiveSCEVType - Return a type with the same bitwidth as
1897 /// the given type and which represents how SCEV will treat the given
1898 /// type, for which isSCEVable must return true. For pointer types,
1899 /// this is the pointer-sized integer type.
1900 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
1901 assert(isSCEVable(Ty) && "Type is not SCEVable!");
1903 if (Ty->isInteger())
1906 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
1907 return TD->getIntPtrType();
1910 const SCEV* ScalarEvolution::getCouldNotCompute() {
1911 return CouldNotCompute;
1914 /// hasSCEV - Return true if the SCEV for this value has already been
1916 bool ScalarEvolution::hasSCEV(Value *V) const {
1917 return Scalars.count(V);
1920 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
1921 /// expression and create a new one.
1922 const SCEV* ScalarEvolution::getSCEV(Value *V) {
1923 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
1925 std::map<SCEVCallbackVH, const SCEV*>::iterator I = Scalars.find(V);
1926 if (I != Scalars.end()) return I->second;
1927 const SCEV* S = createSCEV(V);
1928 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
1932 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
1933 /// specified signed integer value and return a SCEV for the constant.
1934 const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
1935 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
1936 return getConstant(ConstantInt::get(ITy, Val));
1939 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
1941 const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) {
1942 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
1943 return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
1945 const Type *Ty = V->getType();
1946 Ty = getEffectiveSCEVType(Ty);
1947 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
1950 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
1951 const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) {
1952 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
1953 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
1955 const Type *Ty = V->getType();
1956 Ty = getEffectiveSCEVType(Ty);
1957 const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
1958 return getMinusSCEV(AllOnes, V);
1961 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
1963 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
1966 return getAddExpr(LHS, getNegativeSCEV(RHS));
1969 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
1970 /// input value to the specified type. If the type must be extended, it is zero
1973 ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V,
1975 const Type *SrcTy = V->getType();
1976 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
1977 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
1978 "Cannot truncate or zero extend with non-integer arguments!");
1979 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
1980 return V; // No conversion
1981 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
1982 return getTruncateExpr(V, Ty);
1983 return getZeroExtendExpr(V, Ty);
1986 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
1987 /// input value to the specified type. If the type must be extended, it is sign
1990 ScalarEvolution::getTruncateOrSignExtend(const SCEV* V,
1992 const Type *SrcTy = V->getType();
1993 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
1994 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
1995 "Cannot truncate or zero extend with non-integer arguments!");
1996 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
1997 return V; // No conversion
1998 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
1999 return getTruncateExpr(V, Ty);
2000 return getSignExtendExpr(V, Ty);
2003 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2004 /// input value to the specified type. If the type must be extended, it is zero
2005 /// extended. The conversion must not be narrowing.
2007 ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) {
2008 const Type *SrcTy = V->getType();
2009 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2010 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2011 "Cannot noop or zero extend with non-integer arguments!");
2012 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2013 "getNoopOrZeroExtend cannot truncate!");
2014 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2015 return V; // No conversion
2016 return getZeroExtendExpr(V, Ty);
2019 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2020 /// input value to the specified type. If the type must be extended, it is sign
2021 /// extended. The conversion must not be narrowing.
2023 ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) {
2024 const Type *SrcTy = V->getType();
2025 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2026 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2027 "Cannot noop or sign extend with non-integer arguments!");
2028 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2029 "getNoopOrSignExtend cannot truncate!");
2030 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2031 return V; // No conversion
2032 return getSignExtendExpr(V, Ty);
2035 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2036 /// the input value to the specified type. If the type must be extended,
2037 /// it is extended with unspecified bits. The conversion must not be
2040 ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) {
2041 const Type *SrcTy = V->getType();
2042 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2043 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2044 "Cannot noop or any extend with non-integer arguments!");
2045 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2046 "getNoopOrAnyExtend cannot truncate!");
2047 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2048 return V; // No conversion
2049 return getAnyExtendExpr(V, Ty);
2052 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2053 /// input value to the specified type. The conversion must not be widening.
2055 ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) {
2056 const Type *SrcTy = V->getType();
2057 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2058 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2059 "Cannot truncate or noop with non-integer arguments!");
2060 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2061 "getTruncateOrNoop cannot extend!");
2062 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2063 return V; // No conversion
2064 return getTruncateExpr(V, Ty);
2067 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2068 /// the types using zero-extension, and then perform a umax operation
2070 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2072 const SCEV* PromotedLHS = LHS;
2073 const SCEV* PromotedRHS = RHS;
2075 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2076 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2078 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2080 return getUMaxExpr(PromotedLHS, PromotedRHS);
2083 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2084 /// the types using zero-extension, and then perform a umin operation
2086 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2088 const SCEV* PromotedLHS = LHS;
2089 const SCEV* PromotedRHS = RHS;
2091 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2092 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2094 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2096 return getUMinExpr(PromotedLHS, PromotedRHS);
2099 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2100 /// the specified instruction and replaces any references to the symbolic value
2101 /// SymName with the specified value. This is used during PHI resolution.
2103 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2104 const SCEV *SymName,
2105 const SCEV *NewVal) {
2106 std::map<SCEVCallbackVH, const SCEV*>::iterator SI =
2107 Scalars.find(SCEVCallbackVH(I, this));
2108 if (SI == Scalars.end()) return;
2111 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2112 if (NV == SI->second) return; // No change.
2114 SI->second = NV; // Update the scalars map!
2116 // Any instruction values that use this instruction might also need to be
2118 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2120 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2123 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2124 /// a loop header, making it a potential recurrence, or it doesn't.
2126 const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) {
2127 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
2128 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2129 if (L->getHeader() == PN->getParent()) {
2130 // If it lives in the loop header, it has two incoming values, one
2131 // from outside the loop, and one from inside.
2132 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2133 unsigned BackEdge = IncomingEdge^1;
2135 // While we are analyzing this PHI node, handle its value symbolically.
2136 const SCEV* SymbolicName = getUnknown(PN);
2137 assert(Scalars.find(PN) == Scalars.end() &&
2138 "PHI node already processed?");
2139 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2141 // Using this symbolic name for the PHI, analyze the value coming around
2143 const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2145 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2146 // has a special value for the first iteration of the loop.
2148 // If the value coming around the backedge is an add with the symbolic
2149 // value we just inserted, then we found a simple induction variable!
2150 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2151 // If there is a single occurrence of the symbolic value, replace it
2152 // with a recurrence.
2153 unsigned FoundIndex = Add->getNumOperands();
2154 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2155 if (Add->getOperand(i) == SymbolicName)
2156 if (FoundIndex == e) {
2161 if (FoundIndex != Add->getNumOperands()) {
2162 // Create an add with everything but the specified operand.
2163 SmallVector<const SCEV*, 8> Ops;
2164 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2165 if (i != FoundIndex)
2166 Ops.push_back(Add->getOperand(i));
2167 const SCEV* Accum = getAddExpr(Ops);
2169 // This is not a valid addrec if the step amount is varying each
2170 // loop iteration, but is not itself an addrec in this loop.
2171 if (Accum->isLoopInvariant(L) ||
2172 (isa<SCEVAddRecExpr>(Accum) &&
2173 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2174 const SCEV *StartVal =
2175 getSCEV(PN->getIncomingValue(IncomingEdge));
2176 const SCEV *PHISCEV =
2177 getAddRecExpr(StartVal, Accum, L);
2179 // Okay, for the entire analysis of this edge we assumed the PHI
2180 // to be symbolic. We now need to go back and update all of the
2181 // entries for the scalars that use the PHI (except for the PHI
2182 // itself) to use the new analyzed value instead of the "symbolic"
2184 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2188 } else if (const SCEVAddRecExpr *AddRec =
2189 dyn_cast<SCEVAddRecExpr>(BEValue)) {
2190 // Otherwise, this could be a loop like this:
2191 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2192 // In this case, j = {1,+,1} and BEValue is j.
2193 // Because the other in-value of i (0) fits the evolution of BEValue
2194 // i really is an addrec evolution.
2195 if (AddRec->getLoop() == L && AddRec->isAffine()) {
2196 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2198 // If StartVal = j.start - j.stride, we can use StartVal as the
2199 // initial step of the addrec evolution.
2200 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2201 AddRec->getOperand(1))) {
2202 const SCEV* PHISCEV =
2203 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2205 // Okay, for the entire analysis of this edge we assumed the PHI
2206 // to be symbolic. We now need to go back and update all of the
2207 // entries for the scalars that use the PHI (except for the PHI
2208 // itself) to use the new analyzed value instead of the "symbolic"
2210 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2216 return SymbolicName;
2219 // If it's not a loop phi, we can't handle it yet.
2220 return getUnknown(PN);
2223 /// createNodeForGEP - Expand GEP instructions into add and multiply
2224 /// operations. This allows them to be analyzed by regular SCEV code.
2226 const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) {
2228 const Type *IntPtrTy = TD->getIntPtrType();
2229 Value *Base = GEP->getOperand(0);
2230 // Don't attempt to analyze GEPs over unsized objects.
2231 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2232 return getUnknown(GEP);
2233 const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy);
2234 gep_type_iterator GTI = gep_type_begin(GEP);
2235 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2239 // Compute the (potentially symbolic) offset in bytes for this index.
2240 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2241 // For a struct, add the member offset.
2242 const StructLayout &SL = *TD->getStructLayout(STy);
2243 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2244 uint64_t Offset = SL.getElementOffset(FieldNo);
2245 TotalOffset = getAddExpr(TotalOffset,
2246 getIntegerSCEV(Offset, IntPtrTy));
2248 // For an array, add the element offset, explicitly scaled.
2249 const SCEV* LocalOffset = getSCEV(Index);
2250 if (!isa<PointerType>(LocalOffset->getType()))
2251 // Getelementptr indicies are signed.
2252 LocalOffset = getTruncateOrSignExtend(LocalOffset,
2255 getMulExpr(LocalOffset,
2256 getIntegerSCEV(TD->getTypeAllocSize(*GTI),
2258 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2261 return getAddExpr(getSCEV(Base), TotalOffset);
2264 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2265 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2266 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2267 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2269 ScalarEvolution::GetMinTrailingZeros(const SCEV* S) {
2270 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2271 return C->getValue()->getValue().countTrailingZeros();
2273 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2274 return std::min(GetMinTrailingZeros(T->getOperand()),
2275 (uint32_t)getTypeSizeInBits(T->getType()));
2277 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2278 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2279 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2280 getTypeSizeInBits(E->getType()) : OpRes;
2283 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2284 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2285 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2286 getTypeSizeInBits(E->getType()) : OpRes;
2289 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2290 // The result is the min of all operands results.
2291 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2292 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2293 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2297 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2298 // The result is the sum of all operands results.
2299 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2300 uint32_t BitWidth = getTypeSizeInBits(M->getType());
2301 for (unsigned i = 1, e = M->getNumOperands();
2302 SumOpRes != BitWidth && i != e; ++i)
2303 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2308 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2309 // The result is the min of all operands results.
2310 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2311 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2312 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2316 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2317 // The result is the min of all operands results.
2318 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2319 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2320 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2324 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2325 // The result is the min of all operands results.
2326 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2327 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2328 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2332 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2333 // For a SCEVUnknown, ask ValueTracking.
2334 unsigned BitWidth = getTypeSizeInBits(U->getType());
2335 APInt Mask = APInt::getAllOnesValue(BitWidth);
2336 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2337 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2338 return Zeros.countTrailingOnes();
2346 ScalarEvolution::GetMinLeadingZeros(const SCEV* S) {
2347 // TODO: Handle other SCEV expression types here.
2349 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2350 return C->getValue()->getValue().countLeadingZeros();
2352 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) {
2353 // A zero-extension cast adds zero bits.
2354 return GetMinLeadingZeros(C->getOperand()) +
2355 (getTypeSizeInBits(C->getType()) -
2356 getTypeSizeInBits(C->getOperand()->getType()));
2359 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2360 // For a SCEVUnknown, ask ValueTracking.
2361 unsigned BitWidth = getTypeSizeInBits(U->getType());
2362 APInt Mask = APInt::getAllOnesValue(BitWidth);
2363 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2364 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2365 return Zeros.countLeadingOnes();
2372 ScalarEvolution::GetMinSignBits(const SCEV* S) {
2373 // TODO: Handle other SCEV expression types here.
2375 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
2376 const APInt &A = C->getValue()->getValue();
2377 return A.isNegative() ? A.countLeadingOnes() :
2378 A.countLeadingZeros();
2381 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) {
2382 // A sign-extension cast adds sign bits.
2383 return GetMinSignBits(C->getOperand()) +
2384 (getTypeSizeInBits(C->getType()) -
2385 getTypeSizeInBits(C->getOperand()->getType()));
2388 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2389 unsigned BitWidth = getTypeSizeInBits(A->getType());
2391 // Special case decrementing a value (ADD X, -1):
2392 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0)))
2393 if (CRHS->isAllOnesValue()) {
2394 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end());
2395 const SCEV *OtherOpsAdd = getAddExpr(OtherOps);
2396 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd);
2398 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2400 if (LZ == BitWidth - 1)
2403 // If we are subtracting one from a positive number, there is no carry
2404 // out of the result.
2406 return GetMinSignBits(OtherOpsAdd);
2409 // Add can have at most one carry bit. Thus we know that the output
2410 // is, at worst, one more bit than the inputs.
2411 unsigned Min = BitWidth;
2412 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2413 unsigned N = GetMinSignBits(A->getOperand(i));
2414 Min = std::min(Min, N) - 1;
2415 if (Min == 0) return 1;
2420 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2421 // For a SCEVUnknown, ask ValueTracking.
2422 return ComputeNumSignBits(U->getValue(), TD);
2428 /// createSCEV - We know that there is no SCEV for the specified value.
2429 /// Analyze the expression.
2431 const SCEV* ScalarEvolution::createSCEV(Value *V) {
2432 if (!isSCEVable(V->getType()))
2433 return getUnknown(V);
2435 unsigned Opcode = Instruction::UserOp1;
2436 if (Instruction *I = dyn_cast<Instruction>(V))
2437 Opcode = I->getOpcode();
2438 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2439 Opcode = CE->getOpcode();
2440 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2441 return getConstant(CI);
2442 else if (isa<ConstantPointerNull>(V))
2443 return getIntegerSCEV(0, V->getType());
2444 else if (isa<UndefValue>(V))
2445 return getIntegerSCEV(0, V->getType());
2447 return getUnknown(V);
2449 User *U = cast<User>(V);
2451 case Instruction::Add:
2452 return getAddExpr(getSCEV(U->getOperand(0)),
2453 getSCEV(U->getOperand(1)));
2454 case Instruction::Mul:
2455 return getMulExpr(getSCEV(U->getOperand(0)),
2456 getSCEV(U->getOperand(1)));
2457 case Instruction::UDiv:
2458 return getUDivExpr(getSCEV(U->getOperand(0)),
2459 getSCEV(U->getOperand(1)));
2460 case Instruction::Sub:
2461 return getMinusSCEV(getSCEV(U->getOperand(0)),
2462 getSCEV(U->getOperand(1)));
2463 case Instruction::And:
2464 // For an expression like x&255 that merely masks off the high bits,
2465 // use zext(trunc(x)) as the SCEV expression.
2466 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2467 if (CI->isNullValue())
2468 return getSCEV(U->getOperand(1));
2469 if (CI->isAllOnesValue())
2470 return getSCEV(U->getOperand(0));
2471 const APInt &A = CI->getValue();
2473 // Instcombine's ShrinkDemandedConstant may strip bits out of
2474 // constants, obscuring what would otherwise be a low-bits mask.
2475 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2476 // knew about to reconstruct a low-bits mask value.
2477 unsigned LZ = A.countLeadingZeros();
2478 unsigned BitWidth = A.getBitWidth();
2479 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2480 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2481 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2483 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2485 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2487 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2488 IntegerType::get(BitWidth - LZ)),
2493 case Instruction::Or:
2494 // If the RHS of the Or is a constant, we may have something like:
2495 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
2496 // optimizations will transparently handle this case.
2498 // In order for this transformation to be safe, the LHS must be of the
2499 // form X*(2^n) and the Or constant must be less than 2^n.
2500 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2501 const SCEV* LHS = getSCEV(U->getOperand(0));
2502 const APInt &CIVal = CI->getValue();
2503 if (GetMinTrailingZeros(LHS) >=
2504 (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2505 return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2508 case Instruction::Xor:
2509 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2510 // If the RHS of the xor is a signbit, then this is just an add.
2511 // Instcombine turns add of signbit into xor as a strength reduction step.
2512 if (CI->getValue().isSignBit())
2513 return getAddExpr(getSCEV(U->getOperand(0)),
2514 getSCEV(U->getOperand(1)));
2516 // If the RHS of xor is -1, then this is a not operation.
2517 if (CI->isAllOnesValue())
2518 return getNotSCEV(getSCEV(U->getOperand(0)));
2520 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2521 // This is a variant of the check for xor with -1, and it handles
2522 // the case where instcombine has trimmed non-demanded bits out
2523 // of an xor with -1.
2524 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2525 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2526 if (BO->getOpcode() == Instruction::And &&
2527 LCI->getValue() == CI->getValue())
2528 if (const SCEVZeroExtendExpr *Z =
2529 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2530 const Type *UTy = U->getType();
2531 const SCEV* Z0 = Z->getOperand();
2532 const Type *Z0Ty = Z0->getType();
2533 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2535 // If C is a low-bits mask, the zero extend is zerving to
2536 // mask off the high bits. Complement the operand and
2537 // re-apply the zext.
2538 if (APIntOps::isMask(Z0TySize, CI->getValue()))
2539 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2541 // If C is a single bit, it may be in the sign-bit position
2542 // before the zero-extend. In this case, represent the xor
2543 // using an add, which is equivalent, and re-apply the zext.
2544 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2545 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2547 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2553 case Instruction::Shl:
2554 // Turn shift left of a constant amount into a multiply.
2555 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2556 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2557 Constant *X = ConstantInt::get(
2558 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2559 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2563 case Instruction::LShr:
2564 // Turn logical shift right of a constant into a unsigned divide.
2565 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2566 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2567 Constant *X = ConstantInt::get(
2568 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2569 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2573 case Instruction::AShr:
2574 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2575 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2576 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2577 if (L->getOpcode() == Instruction::Shl &&
2578 L->getOperand(1) == U->getOperand(1)) {
2579 unsigned BitWidth = getTypeSizeInBits(U->getType());
2580 uint64_t Amt = BitWidth - CI->getZExtValue();
2581 if (Amt == BitWidth)
2582 return getSCEV(L->getOperand(0)); // shift by zero --> noop
2584 return getIntegerSCEV(0, U->getType()); // value is undefined
2586 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2587 IntegerType::get(Amt)),
2592 case Instruction::Trunc:
2593 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2595 case Instruction::ZExt:
2596 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2598 case Instruction::SExt:
2599 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2601 case Instruction::BitCast:
2602 // BitCasts are no-op casts so we just eliminate the cast.
2603 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2604 return getSCEV(U->getOperand(0));
2607 case Instruction::IntToPtr:
2608 if (!TD) break; // Without TD we can't analyze pointers.
2609 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2610 TD->getIntPtrType());
2612 case Instruction::PtrToInt:
2613 if (!TD) break; // Without TD we can't analyze pointers.
2614 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2617 case Instruction::GetElementPtr:
2618 if (!TD) break; // Without TD we can't analyze pointers.
2619 return createNodeForGEP(U);
2621 case Instruction::PHI:
2622 return createNodeForPHI(cast<PHINode>(U));
2624 case Instruction::Select:
2625 // This could be a smax or umax that was lowered earlier.
2626 // Try to recover it.
2627 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2628 Value *LHS = ICI->getOperand(0);
2629 Value *RHS = ICI->getOperand(1);
2630 switch (ICI->getPredicate()) {
2631 case ICmpInst::ICMP_SLT:
2632 case ICmpInst::ICMP_SLE:
2633 std::swap(LHS, RHS);
2635 case ICmpInst::ICMP_SGT:
2636 case ICmpInst::ICMP_SGE:
2637 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2638 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2639 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2640 return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2642 case ICmpInst::ICMP_ULT:
2643 case ICmpInst::ICMP_ULE:
2644 std::swap(LHS, RHS);
2646 case ICmpInst::ICMP_UGT:
2647 case ICmpInst::ICMP_UGE:
2648 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2649 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2650 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2651 return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2653 case ICmpInst::ICMP_NE:
2654 // n != 0 ? n : 1 -> umax(n, 1)
2655 if (LHS == U->getOperand(1) &&
2656 isa<ConstantInt>(U->getOperand(2)) &&
2657 cast<ConstantInt>(U->getOperand(2))->isOne() &&
2658 isa<ConstantInt>(RHS) &&
2659 cast<ConstantInt>(RHS)->isZero())
2660 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2662 case ICmpInst::ICMP_EQ:
2663 // n == 0 ? 1 : n -> umax(n, 1)
2664 if (LHS == U->getOperand(2) &&
2665 isa<ConstantInt>(U->getOperand(1)) &&
2666 cast<ConstantInt>(U->getOperand(1))->isOne() &&
2667 isa<ConstantInt>(RHS) &&
2668 cast<ConstantInt>(RHS)->isZero())
2669 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2676 default: // We cannot analyze this expression.
2680 return getUnknown(V);
2685 //===----------------------------------------------------------------------===//
2686 // Iteration Count Computation Code
2689 /// getBackedgeTakenCount - If the specified loop has a predictable
2690 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2691 /// object. The backedge-taken count is the number of times the loop header
2692 /// will be branched to from within the loop. This is one less than the
2693 /// trip count of the loop, since it doesn't count the first iteration,
2694 /// when the header is branched to from outside the loop.
2696 /// Note that it is not valid to call this method on a loop without a
2697 /// loop-invariant backedge-taken count (see
2698 /// hasLoopInvariantBackedgeTakenCount).
2700 const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
2701 return getBackedgeTakenInfo(L).Exact;
2704 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
2705 /// return the least SCEV value that is known never to be less than the
2706 /// actual backedge taken count.
2707 const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
2708 return getBackedgeTakenInfo(L).Max;
2711 const ScalarEvolution::BackedgeTakenInfo &
2712 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
2713 // Initially insert a CouldNotCompute for this loop. If the insertion
2714 // succeeds, procede to actually compute a backedge-taken count and
2715 // update the value. The temporary CouldNotCompute value tells SCEV
2716 // code elsewhere that it shouldn't attempt to request a new
2717 // backedge-taken count, which could result in infinite recursion.
2718 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
2719 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
2721 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
2722 if (ItCount.Exact != CouldNotCompute) {
2723 assert(ItCount.Exact->isLoopInvariant(L) &&
2724 ItCount.Max->isLoopInvariant(L) &&
2725 "Computed trip count isn't loop invariant for loop!");
2726 ++NumTripCountsComputed;
2728 // Update the value in the map.
2729 Pair.first->second = ItCount;
2731 if (ItCount.Max != CouldNotCompute)
2732 // Update the value in the map.
2733 Pair.first->second = ItCount;
2734 if (isa<PHINode>(L->getHeader()->begin()))
2735 // Only count loops that have phi nodes as not being computable.
2736 ++NumTripCountsNotComputed;
2739 // Now that we know more about the trip count for this loop, forget any
2740 // existing SCEV values for PHI nodes in this loop since they are only
2741 // conservative estimates made without the benefit
2742 // of trip count information.
2743 if (ItCount.hasAnyInfo())
2746 return Pair.first->second;
2749 /// forgetLoopBackedgeTakenCount - This method should be called by the
2750 /// client when it has changed a loop in a way that may effect
2751 /// ScalarEvolution's ability to compute a trip count, or if the loop
2753 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
2754 BackedgeTakenCounts.erase(L);
2758 /// forgetLoopPHIs - Delete the memoized SCEVs associated with the
2759 /// PHI nodes in the given loop. This is used when the trip count of
2760 /// the loop may have changed.
2761 void ScalarEvolution::forgetLoopPHIs(const Loop *L) {
2762 BasicBlock *Header = L->getHeader();
2764 // Push all Loop-header PHIs onto the Worklist stack, except those
2765 // that are presently represented via a SCEVUnknown. SCEVUnknown for
2766 // a PHI either means that it has an unrecognized structure, or it's
2767 // a PHI that's in the progress of being computed by createNodeForPHI.
2768 // In the former case, additional loop trip count information isn't
2769 // going to change anything. In the later case, createNodeForPHI will
2770 // perform the necessary updates on its own when it gets to that point.
2771 SmallVector<Instruction *, 16> Worklist;
2772 for (BasicBlock::iterator I = Header->begin();
2773 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2774 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2775 Scalars.find((Value*)I);
2776 if (It != Scalars.end() && !isa<SCEVUnknown>(It->second))
2777 Worklist.push_back(PN);
2780 while (!Worklist.empty()) {
2781 Instruction *I = Worklist.pop_back_val();
2782 if (Scalars.erase(I))
2783 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2785 Worklist.push_back(cast<Instruction>(UI));
2789 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
2790 /// of the specified loop will execute.
2791 ScalarEvolution::BackedgeTakenInfo
2792 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
2793 SmallVector<BasicBlock*, 8> ExitingBlocks;
2794 L->getExitingBlocks(ExitingBlocks);
2796 // Examine all exits and pick the most conservative values.
2797 const SCEV* BECount = CouldNotCompute;
2798 const SCEV* MaxBECount = CouldNotCompute;
2799 bool CouldNotComputeBECount = false;
2800 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2801 BackedgeTakenInfo NewBTI =
2802 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
2804 if (NewBTI.Exact == CouldNotCompute) {
2805 // We couldn't compute an exact value for this exit, so
2806 // we won't be able to compute an exact value for the loop.
2807 CouldNotComputeBECount = true;
2808 BECount = CouldNotCompute;
2809 } else if (!CouldNotComputeBECount) {
2810 if (BECount == CouldNotCompute)
2811 BECount = NewBTI.Exact;
2813 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
2815 if (MaxBECount == CouldNotCompute)
2816 MaxBECount = NewBTI.Max;
2817 else if (NewBTI.Max != CouldNotCompute)
2818 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
2821 return BackedgeTakenInfo(BECount, MaxBECount);
2824 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
2825 /// of the specified loop will execute if it exits via the specified block.
2826 ScalarEvolution::BackedgeTakenInfo
2827 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
2828 BasicBlock *ExitingBlock) {
2830 // Okay, we've chosen an exiting block. See what condition causes us to
2831 // exit at this block.
2833 // FIXME: we should be able to handle switch instructions (with a single exit)
2834 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2835 if (ExitBr == 0) return CouldNotCompute;
2836 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
2838 // At this point, we know we have a conditional branch that determines whether
2839 // the loop is exited. However, we don't know if the branch is executed each
2840 // time through the loop. If not, then the execution count of the branch will
2841 // not be equal to the trip count of the loop.
2843 // Currently we check for this by checking to see if the Exit branch goes to
2844 // the loop header. If so, we know it will always execute the same number of
2845 // times as the loop. We also handle the case where the exit block *is* the
2846 // loop header. This is common for un-rotated loops.
2848 // If both of those tests fail, walk up the unique predecessor chain to the
2849 // header, stopping if there is an edge that doesn't exit the loop. If the
2850 // header is reached, the execution count of the branch will be equal to the
2851 // trip count of the loop.
2853 // More extensive analysis could be done to handle more cases here.
2855 if (ExitBr->getSuccessor(0) != L->getHeader() &&
2856 ExitBr->getSuccessor(1) != L->getHeader() &&
2857 ExitBr->getParent() != L->getHeader()) {
2858 // The simple checks failed, try climbing the unique predecessor chain
2859 // up to the header.
2861 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
2862 BasicBlock *Pred = BB->getUniquePredecessor();
2864 return CouldNotCompute;
2865 TerminatorInst *PredTerm = Pred->getTerminator();
2866 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
2867 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
2870 // If the predecessor has a successor that isn't BB and isn't
2871 // outside the loop, assume the worst.
2872 if (L->contains(PredSucc))
2873 return CouldNotCompute;
2875 if (Pred == L->getHeader()) {
2882 return CouldNotCompute;
2885 // Procede to the next level to examine the exit condition expression.
2886 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
2887 ExitBr->getSuccessor(0),
2888 ExitBr->getSuccessor(1));
2891 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
2892 /// backedge of the specified loop will execute if its exit condition
2893 /// were a conditional branch of ExitCond, TBB, and FBB.
2894 ScalarEvolution::BackedgeTakenInfo
2895 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
2899 // Check if the controlling expression for this loop is an And or Or.
2900 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
2901 if (BO->getOpcode() == Instruction::And) {
2902 // Recurse on the operands of the and.
2903 BackedgeTakenInfo BTI0 =
2904 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
2905 BackedgeTakenInfo BTI1 =
2906 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
2907 const SCEV* BECount = CouldNotCompute;
2908 const SCEV* MaxBECount = CouldNotCompute;
2909 if (L->contains(TBB)) {
2910 // Both conditions must be true for the loop to continue executing.
2911 // Choose the less conservative count.
2912 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute)
2913 BECount = CouldNotCompute;
2915 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2916 if (BTI0.Max == CouldNotCompute)
2917 MaxBECount = BTI1.Max;
2918 else if (BTI1.Max == CouldNotCompute)
2919 MaxBECount = BTI0.Max;
2921 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
2923 // Both conditions must be true for the loop to exit.
2924 assert(L->contains(FBB) && "Loop block has no successor in loop!");
2925 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute)
2926 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2927 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute)
2928 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
2931 return BackedgeTakenInfo(BECount, MaxBECount);
2933 if (BO->getOpcode() == Instruction::Or) {
2934 // Recurse on the operands of the or.
2935 BackedgeTakenInfo BTI0 =
2936 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
2937 BackedgeTakenInfo BTI1 =
2938 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
2939 const SCEV* BECount = CouldNotCompute;
2940 const SCEV* MaxBECount = CouldNotCompute;
2941 if (L->contains(FBB)) {
2942 // Both conditions must be false for the loop to continue executing.
2943 // Choose the less conservative count.
2944 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute)
2945 BECount = CouldNotCompute;
2947 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2948 if (BTI0.Max == CouldNotCompute)
2949 MaxBECount = BTI1.Max;
2950 else if (BTI1.Max == CouldNotCompute)
2951 MaxBECount = BTI0.Max;
2953 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
2955 // Both conditions must be false for the loop to exit.
2956 assert(L->contains(TBB) && "Loop block has no successor in loop!");
2957 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute)
2958 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2959 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute)
2960 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
2963 return BackedgeTakenInfo(BECount, MaxBECount);
2967 // With an icmp, it may be feasible to compute an exact backedge-taken count.
2968 // Procede to the next level to examine the icmp.
2969 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
2970 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
2972 // If it's not an integer or pointer comparison then compute it the hard way.
2973 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
2976 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
2977 /// backedge of the specified loop will execute if its exit condition
2978 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
2979 ScalarEvolution::BackedgeTakenInfo
2980 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
2985 // If the condition was exit on true, convert the condition to exit on false
2986 ICmpInst::Predicate Cond;
2987 if (!L->contains(FBB))
2988 Cond = ExitCond->getPredicate();
2990 Cond = ExitCond->getInversePredicate();
2992 // Handle common loops like: for (X = "string"; *X; ++X)
2993 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
2994 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
2996 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
2997 if (!isa<SCEVCouldNotCompute>(ItCnt)) {
2998 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
2999 return BackedgeTakenInfo(ItCnt,
3000 isa<SCEVConstant>(ItCnt) ? ItCnt :
3001 getConstant(APInt::getMaxValue(BitWidth)-1));
3005 const SCEV* LHS = getSCEV(ExitCond->getOperand(0));
3006 const SCEV* RHS = getSCEV(ExitCond->getOperand(1));
3008 // Try to evaluate any dependencies out of the loop.
3009 LHS = getSCEVAtScope(LHS, L);
3010 RHS = getSCEVAtScope(RHS, L);
3012 // At this point, we would like to compute how many iterations of the
3013 // loop the predicate will return true for these inputs.
3014 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3015 // If there is a loop-invariant, force it into the RHS.
3016 std::swap(LHS, RHS);
3017 Cond = ICmpInst::getSwappedPredicate(Cond);
3020 // If we have a comparison of a chrec against a constant, try to use value
3021 // ranges to answer this query.
3022 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3023 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3024 if (AddRec->getLoop() == L) {
3025 // Form the constant range.
3026 ConstantRange CompRange(
3027 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3029 const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3030 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3034 case ICmpInst::ICMP_NE: { // while (X != Y)
3035 // Convert to: while (X-Y != 0)
3036 const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3037 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3040 case ICmpInst::ICMP_EQ: {
3041 // Convert to: while (X-Y == 0) // while (X == Y)
3042 const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3043 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3046 case ICmpInst::ICMP_SLT: {
3047 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3048 if (BTI.hasAnyInfo()) return BTI;
3051 case ICmpInst::ICMP_SGT: {
3052 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3053 getNotSCEV(RHS), L, true);
3054 if (BTI.hasAnyInfo()) return BTI;
3057 case ICmpInst::ICMP_ULT: {
3058 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3059 if (BTI.hasAnyInfo()) return BTI;
3062 case ICmpInst::ICMP_UGT: {
3063 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3064 getNotSCEV(RHS), L, false);
3065 if (BTI.hasAnyInfo()) return BTI;
3070 errs() << "ComputeBackedgeTakenCount ";
3071 if (ExitCond->getOperand(0)->getType()->isUnsigned())
3072 errs() << "[unsigned] ";
3073 errs() << *LHS << " "
3074 << Instruction::getOpcodeName(Instruction::ICmp)
3075 << " " << *RHS << "\n";
3080 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3083 static ConstantInt *
3084 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3085 ScalarEvolution &SE) {
3086 const SCEV* InVal = SE.getConstant(C);
3087 const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE);
3088 assert(isa<SCEVConstant>(Val) &&
3089 "Evaluation of SCEV at constant didn't fold correctly?");
3090 return cast<SCEVConstant>(Val)->getValue();
3093 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3094 /// and a GEP expression (missing the pointer index) indexing into it, return
3095 /// the addressed element of the initializer or null if the index expression is
3098 GetAddressedElementFromGlobal(GlobalVariable *GV,
3099 const std::vector<ConstantInt*> &Indices) {
3100 Constant *Init = GV->getInitializer();
3101 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3102 uint64_t Idx = Indices[i]->getZExtValue();
3103 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3104 assert(Idx < CS->getNumOperands() && "Bad struct index!");
3105 Init = cast<Constant>(CS->getOperand(Idx));
3106 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3107 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
3108 Init = cast<Constant>(CA->getOperand(Idx));
3109 } else if (isa<ConstantAggregateZero>(Init)) {
3110 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3111 assert(Idx < STy->getNumElements() && "Bad struct index!");
3112 Init = Constant::getNullValue(STy->getElementType(Idx));
3113 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3114 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
3115 Init = Constant::getNullValue(ATy->getElementType());
3117 assert(0 && "Unknown constant aggregate type!");
3121 return 0; // Unknown initializer type
3127 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3128 /// 'icmp op load X, cst', try to see if we can compute the backedge
3129 /// execution count.
3131 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3135 ICmpInst::Predicate predicate) {
3136 if (LI->isVolatile()) return CouldNotCompute;
3138 // Check to see if the loaded pointer is a getelementptr of a global.
3139 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3140 if (!GEP) return CouldNotCompute;
3142 // Make sure that it is really a constant global we are gepping, with an
3143 // initializer, and make sure the first IDX is really 0.
3144 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3145 if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3146 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3147 !cast<Constant>(GEP->getOperand(1))->isNullValue())
3148 return CouldNotCompute;
3150 // Okay, we allow one non-constant index into the GEP instruction.
3152 std::vector<ConstantInt*> Indexes;
3153 unsigned VarIdxNum = 0;
3154 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3155 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3156 Indexes.push_back(CI);
3157 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3158 if (VarIdx) return CouldNotCompute; // Multiple non-constant idx's.
3159 VarIdx = GEP->getOperand(i);
3161 Indexes.push_back(0);
3164 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3165 // Check to see if X is a loop variant variable value now.
3166 const SCEV* Idx = getSCEV(VarIdx);
3167 Idx = getSCEVAtScope(Idx, L);
3169 // We can only recognize very limited forms of loop index expressions, in
3170 // particular, only affine AddRec's like {C1,+,C2}.
3171 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3172 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3173 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3174 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3175 return CouldNotCompute;
3177 unsigned MaxSteps = MaxBruteForceIterations;
3178 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3179 ConstantInt *ItCst =
3180 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3181 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3183 // Form the GEP offset.
3184 Indexes[VarIdxNum] = Val;
3186 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3187 if (Result == 0) break; // Cannot compute!
3189 // Evaluate the condition for this iteration.
3190 Result = ConstantExpr::getICmp(predicate, Result, RHS);
3191 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
3192 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3194 errs() << "\n***\n*** Computed loop count " << *ItCst
3195 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3198 ++NumArrayLenItCounts;
3199 return getConstant(ItCst); // Found terminating iteration!
3202 return CouldNotCompute;
3206 /// CanConstantFold - Return true if we can constant fold an instruction of the
3207 /// specified type, assuming that all operands were constants.
3208 static bool CanConstantFold(const Instruction *I) {
3209 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3210 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3213 if (const CallInst *CI = dyn_cast<CallInst>(I))
3214 if (const Function *F = CI->getCalledFunction())
3215 return canConstantFoldCallTo(F);
3219 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3220 /// in the loop that V is derived from. We allow arbitrary operations along the
3221 /// way, but the operands of an operation must either be constants or a value
3222 /// derived from a constant PHI. If this expression does not fit with these
3223 /// constraints, return null.
3224 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3225 // If this is not an instruction, or if this is an instruction outside of the
3226 // loop, it can't be derived from a loop PHI.
3227 Instruction *I = dyn_cast<Instruction>(V);
3228 if (I == 0 || !L->contains(I->getParent())) return 0;
3230 if (PHINode *PN = dyn_cast<PHINode>(I)) {
3231 if (L->getHeader() == I->getParent())
3234 // We don't currently keep track of the control flow needed to evaluate
3235 // PHIs, so we cannot handle PHIs inside of loops.
3239 // If we won't be able to constant fold this expression even if the operands
3240 // are constants, return early.
3241 if (!CanConstantFold(I)) return 0;
3243 // Otherwise, we can evaluate this instruction if all of its operands are
3244 // constant or derived from a PHI node themselves.
3246 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3247 if (!(isa<Constant>(I->getOperand(Op)) ||
3248 isa<GlobalValue>(I->getOperand(Op)))) {
3249 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3250 if (P == 0) return 0; // Not evolving from PHI
3254 return 0; // Evolving from multiple different PHIs.
3257 // This is a expression evolving from a constant PHI!
3261 /// EvaluateExpression - Given an expression that passes the
3262 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3263 /// in the loop has the value PHIVal. If we can't fold this expression for some
3264 /// reason, return null.
3265 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3266 if (isa<PHINode>(V)) return PHIVal;
3267 if (Constant *C = dyn_cast<Constant>(V)) return C;
3268 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3269 Instruction *I = cast<Instruction>(V);
3271 std::vector<Constant*> Operands;
3272 Operands.resize(I->getNumOperands());
3274 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3275 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3276 if (Operands[i] == 0) return 0;
3279 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3280 return ConstantFoldCompareInstOperands(CI->getPredicate(),
3281 &Operands[0], Operands.size());
3283 return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3284 &Operands[0], Operands.size());
3287 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3288 /// in the header of its containing loop, we know the loop executes a
3289 /// constant number of times, and the PHI node is just a recurrence
3290 /// involving constants, fold it.
3292 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3295 std::map<PHINode*, Constant*>::iterator I =
3296 ConstantEvolutionLoopExitValue.find(PN);
3297 if (I != ConstantEvolutionLoopExitValue.end())
3300 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3301 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
3303 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3305 // Since the loop is canonicalized, the PHI node must have two entries. One
3306 // entry must be a constant (coming in from outside of the loop), and the
3307 // second must be derived from the same PHI.
3308 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3309 Constant *StartCST =
3310 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3312 return RetVal = 0; // Must be a constant.
3314 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3315 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3317 return RetVal = 0; // Not derived from same PHI.
3319 // Execute the loop symbolically to determine the exit value.
3320 if (BEs.getActiveBits() >= 32)
3321 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3323 unsigned NumIterations = BEs.getZExtValue(); // must be in range
3324 unsigned IterationNum = 0;
3325 for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3326 if (IterationNum == NumIterations)
3327 return RetVal = PHIVal; // Got exit value!
3329 // Compute the value of the PHI node for the next iteration.
3330 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3331 if (NextPHI == PHIVal)
3332 return RetVal = NextPHI; // Stopped evolving!
3334 return 0; // Couldn't evaluate!
3339 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3340 /// constant number of times (the condition evolves only from constants),
3341 /// try to evaluate a few iterations of the loop until we get the exit
3342 /// condition gets a value of ExitWhen (true or false). If we cannot
3343 /// evaluate the trip count of the loop, return CouldNotCompute.
3345 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3348 PHINode *PN = getConstantEvolvingPHI(Cond, L);
3349 if (PN == 0) return CouldNotCompute;
3351 // Since the loop is canonicalized, the PHI node must have two entries. One
3352 // entry must be a constant (coming in from outside of the loop), and the
3353 // second must be derived from the same PHI.
3354 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3355 Constant *StartCST =
3356 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3357 if (StartCST == 0) return CouldNotCompute; // Must be a constant.
3359 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3360 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3361 if (PN2 != PN) return CouldNotCompute; // Not derived from same PHI.
3363 // Okay, we find a PHI node that defines the trip count of this loop. Execute
3364 // the loop symbolically to determine when the condition gets a value of
3366 unsigned IterationNum = 0;
3367 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
3368 for (Constant *PHIVal = StartCST;
3369 IterationNum != MaxIterations; ++IterationNum) {
3370 ConstantInt *CondVal =
3371 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3373 // Couldn't symbolically evaluate.
3374 if (!CondVal) return CouldNotCompute;
3376 if (CondVal->getValue() == uint64_t(ExitWhen)) {
3377 ConstantEvolutionLoopExitValue[PN] = PHIVal;
3378 ++NumBruteForceTripCountsComputed;
3379 return getConstant(Type::Int32Ty, IterationNum);
3382 // Compute the value of the PHI node for the next iteration.
3383 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3384 if (NextPHI == 0 || NextPHI == PHIVal)
3385 return CouldNotCompute; // Couldn't evaluate or not making progress...
3389 // Too many iterations were needed to evaluate.
3390 return CouldNotCompute;
3393 /// getSCEVAtScope - Return a SCEV expression handle for the specified value
3394 /// at the specified scope in the program. The L value specifies a loop
3395 /// nest to evaluate the expression at, where null is the top-level or a
3396 /// specified loop is immediately inside of the loop.
3398 /// This method can be used to compute the exit value for a variable defined
3399 /// in a loop by querying what the value will hold in the parent loop.
3401 /// In the case that a relevant loop exit value cannot be computed, the
3402 /// original value V is returned.
3403 const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3404 // FIXME: this should be turned into a virtual method on SCEV!
3406 if (isa<SCEVConstant>(V)) return V;
3408 // If this instruction is evolved from a constant-evolving PHI, compute the
3409 // exit value from the loop without using SCEVs.
3410 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3411 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3412 const Loop *LI = (*this->LI)[I->getParent()];
3413 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
3414 if (PHINode *PN = dyn_cast<PHINode>(I))
3415 if (PN->getParent() == LI->getHeader()) {
3416 // Okay, there is no closed form solution for the PHI node. Check
3417 // to see if the loop that contains it has a known backedge-taken
3418 // count. If so, we may be able to force computation of the exit
3420 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI);
3421 if (const SCEVConstant *BTCC =
3422 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3423 // Okay, we know how many times the containing loop executes. If
3424 // this is a constant evolving PHI node, get the final value at
3425 // the specified iteration number.
3426 Constant *RV = getConstantEvolutionLoopExitValue(PN,
3427 BTCC->getValue()->getValue(),
3429 if (RV) return getUnknown(RV);
3433 // Okay, this is an expression that we cannot symbolically evaluate
3434 // into a SCEV. Check to see if it's possible to symbolically evaluate
3435 // the arguments into constants, and if so, try to constant propagate the
3436 // result. This is particularly useful for computing loop exit values.
3437 if (CanConstantFold(I)) {
3438 // Check to see if we've folded this instruction at this loop before.
3439 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3440 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3441 Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3443 return Pair.first->second ? &*getUnknown(Pair.first->second) : V;
3445 std::vector<Constant*> Operands;
3446 Operands.reserve(I->getNumOperands());
3447 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3448 Value *Op = I->getOperand(i);
3449 if (Constant *C = dyn_cast<Constant>(Op)) {
3450 Operands.push_back(C);
3452 // If any of the operands is non-constant and if they are
3453 // non-integer and non-pointer, don't even try to analyze them
3454 // with scev techniques.
3455 if (!isSCEVable(Op->getType()))
3458 const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L);
3459 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3460 Constant *C = SC->getValue();
3461 if (C->getType() != Op->getType())
3462 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3466 Operands.push_back(C);
3467 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3468 if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3469 if (C->getType() != Op->getType())
3471 ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3475 Operands.push_back(C);
3485 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3486 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3487 &Operands[0], Operands.size());
3489 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3490 &Operands[0], Operands.size());
3491 Pair.first->second = C;
3492 return getUnknown(C);
3496 // This is some other type of SCEVUnknown, just return it.
3500 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3501 // Avoid performing the look-up in the common case where the specified
3502 // expression has no loop-variant portions.
3503 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3504 const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3505 if (OpAtScope != Comm->getOperand(i)) {
3506 // Okay, at least one of these operands is loop variant but might be
3507 // foldable. Build a new instance of the folded commutative expression.
3508 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3509 Comm->op_begin()+i);
3510 NewOps.push_back(OpAtScope);
3512 for (++i; i != e; ++i) {
3513 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3514 NewOps.push_back(OpAtScope);
3516 if (isa<SCEVAddExpr>(Comm))
3517 return getAddExpr(NewOps);
3518 if (isa<SCEVMulExpr>(Comm))
3519 return getMulExpr(NewOps);
3520 if (isa<SCEVSMaxExpr>(Comm))
3521 return getSMaxExpr(NewOps);
3522 if (isa<SCEVUMaxExpr>(Comm))
3523 return getUMaxExpr(NewOps);
3524 assert(0 && "Unknown commutative SCEV type!");
3527 // If we got here, all operands are loop invariant.
3531 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3532 const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L);
3533 const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L);
3534 if (LHS == Div->getLHS() && RHS == Div->getRHS())
3535 return Div; // must be loop invariant
3536 return getUDivExpr(LHS, RHS);
3539 // If this is a loop recurrence for a loop that does not contain L, then we
3540 // are dealing with the final value computed by the loop.
3541 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3542 if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3543 // To evaluate this recurrence, we need to know how many times the AddRec
3544 // loop iterates. Compute this now.
3545 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3546 if (BackedgeTakenCount == CouldNotCompute) return AddRec;
3548 // Then, evaluate the AddRec.
3549 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3554 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3555 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L);
3556 if (Op == Cast->getOperand())
3557 return Cast; // must be loop invariant
3558 return getZeroExtendExpr(Op, Cast->getType());
3561 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3562 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L);
3563 if (Op == Cast->getOperand())
3564 return Cast; // must be loop invariant
3565 return getSignExtendExpr(Op, Cast->getType());
3568 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3569 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L);
3570 if (Op == Cast->getOperand())
3571 return Cast; // must be loop invariant
3572 return getTruncateExpr(Op, Cast->getType());
3575 assert(0 && "Unknown SCEV type!");
3579 /// getSCEVAtScope - This is a convenience function which does
3580 /// getSCEVAtScope(getSCEV(V), L).
3581 const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3582 return getSCEVAtScope(getSCEV(V), L);
3585 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3586 /// following equation:
3588 /// A * X = B (mod N)
3590 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3591 /// A and B isn't important.
3593 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3594 static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3595 ScalarEvolution &SE) {
3596 uint32_t BW = A.getBitWidth();
3597 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3598 assert(A != 0 && "A must be non-zero.");
3602 // The gcd of A and N may have only one prime factor: 2. The number of
3603 // trailing zeros in A is its multiplicity
3604 uint32_t Mult2 = A.countTrailingZeros();
3607 // 2. Check if B is divisible by D.
3609 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3610 // is not less than multiplicity of this prime factor for D.
3611 if (B.countTrailingZeros() < Mult2)
3612 return SE.getCouldNotCompute();
3614 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3617 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
3618 // bit width during computations.
3619 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
3620 APInt Mod(BW + 1, 0);
3621 Mod.set(BW - Mult2); // Mod = N / D
3622 APInt I = AD.multiplicativeInverse(Mod);
3624 // 4. Compute the minimum unsigned root of the equation:
3625 // I * (B / D) mod (N / D)
3626 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3628 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3630 return SE.getConstant(Result.trunc(BW));
3633 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3634 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
3635 /// might be the same) or two SCEVCouldNotCompute objects.
3637 static std::pair<const SCEV*,const SCEV*>
3638 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3639 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3640 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3641 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3642 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3644 // We currently can only solve this if the coefficients are constants.
3645 if (!LC || !MC || !NC) {
3646 const SCEV *CNC = SE.getCouldNotCompute();
3647 return std::make_pair(CNC, CNC);
3650 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3651 const APInt &L = LC->getValue()->getValue();
3652 const APInt &M = MC->getValue()->getValue();
3653 const APInt &N = NC->getValue()->getValue();
3654 APInt Two(BitWidth, 2);
3655 APInt Four(BitWidth, 4);
3658 using namespace APIntOps;
3660 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
3661 // The B coefficient is M-N/2
3665 // The A coefficient is N/2
3666 APInt A(N.sdiv(Two));
3668 // Compute the B^2-4ac term.
3671 SqrtTerm -= Four * (A * C);
3673 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
3674 // integer value or else APInt::sqrt() will assert.
3675 APInt SqrtVal(SqrtTerm.sqrt());
3677 // Compute the two solutions for the quadratic formula.
3678 // The divisions must be performed as signed divisions.
3680 APInt TwoA( A << 1 );
3681 if (TwoA.isMinValue()) {
3682 const SCEV *CNC = SE.getCouldNotCompute();
3683 return std::make_pair(CNC, CNC);
3686 ConstantInt *Solution1 = ConstantInt::get((NegB + SqrtVal).sdiv(TwoA));
3687 ConstantInt *Solution2 = ConstantInt::get((NegB - SqrtVal).sdiv(TwoA));
3689 return std::make_pair(SE.getConstant(Solution1),
3690 SE.getConstant(Solution2));
3691 } // end APIntOps namespace
3694 /// HowFarToZero - Return the number of times a backedge comparing the specified
3695 /// value to zero will execute. If not computable, return CouldNotCompute.
3696 const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
3697 // If the value is a constant
3698 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3699 // If the value is already zero, the branch will execute zero times.
3700 if (C->getValue()->isZero()) return C;
3701 return CouldNotCompute; // Otherwise it will loop infinitely.
3704 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
3705 if (!AddRec || AddRec->getLoop() != L)
3706 return CouldNotCompute;
3708 if (AddRec->isAffine()) {
3709 // If this is an affine expression, the execution count of this branch is
3710 // the minimum unsigned root of the following equation:
3712 // Start + Step*N = 0 (mod 2^BW)
3716 // Step*N = -Start (mod 2^BW)
3718 // where BW is the common bit width of Start and Step.
3720 // Get the initial value for the loop.
3721 const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
3722 L->getParentLoop());
3723 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
3724 L->getParentLoop());
3726 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
3727 // For now we handle only constant steps.
3729 // First, handle unitary steps.
3730 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
3731 return getNegativeSCEV(Start); // N = -Start (as unsigned)
3732 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
3733 return Start; // N = Start (as unsigned)
3735 // Then, try to solve the above equation provided that Start is constant.
3736 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
3737 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
3738 -StartC->getValue()->getValue(),
3741 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
3742 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
3743 // the quadratic equation to solve it.
3744 std::pair<const SCEV*,const SCEV*> Roots = SolveQuadraticEquation(AddRec,
3746 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
3747 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
3750 errs() << "HFTZ: " << *V << " - sol#1: " << *R1
3751 << " sol#2: " << *R2 << "\n";
3753 // Pick the smallest positive root value.
3754 if (ConstantInt *CB =
3755 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
3756 R1->getValue(), R2->getValue()))) {
3757 if (CB->getZExtValue() == false)
3758 std::swap(R1, R2); // R1 is the minimum root now.
3760 // We can only use this value if the chrec ends up with an exact zero
3761 // value at this index. When solving for "X*X != 5", for example, we
3762 // should not accept a root of 2.
3763 const SCEV* Val = AddRec->evaluateAtIteration(R1, *this);
3765 return R1; // We found a quadratic root!
3770 return CouldNotCompute;
3773 /// HowFarToNonZero - Return the number of times a backedge checking the
3774 /// specified value for nonzero will execute. If not computable, return
3776 const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
3777 // Loops that look like: while (X == 0) are very strange indeed. We don't
3778 // handle them yet except for the trivial case. This could be expanded in the
3779 // future as needed.
3781 // If the value is a constant, check to see if it is known to be non-zero
3782 // already. If so, the backedge will execute zero times.
3783 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3784 if (!C->getValue()->isNullValue())
3785 return getIntegerSCEV(0, C->getType());
3786 return CouldNotCompute; // Otherwise it will loop infinitely.
3789 // We could implement others, but I really doubt anyone writes loops like
3790 // this, and if they did, they would already be constant folded.
3791 return CouldNotCompute;
3794 /// getLoopPredecessor - If the given loop's header has exactly one unique
3795 /// predecessor outside the loop, return it. Otherwise return null.
3797 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
3798 BasicBlock *Header = L->getHeader();
3799 BasicBlock *Pred = 0;
3800 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
3802 if (!L->contains(*PI)) {
3803 if (Pred && Pred != *PI) return 0; // Multiple predecessors.
3809 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
3810 /// (which may not be an immediate predecessor) which has exactly one
3811 /// successor from which BB is reachable, or null if no such block is
3815 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
3816 // If the block has a unique predecessor, then there is no path from the
3817 // predecessor to the block that does not go through the direct edge
3818 // from the predecessor to the block.
3819 if (BasicBlock *Pred = BB->getSinglePredecessor())
3822 // A loop's header is defined to be a block that dominates the loop.
3823 // If the header has a unique predecessor outside the loop, it must be
3824 // a block that has exactly one successor that can reach the loop.
3825 if (Loop *L = LI->getLoopFor(BB))
3826 return getLoopPredecessor(L);
3831 /// HasSameValue - SCEV structural equivalence is usually sufficient for
3832 /// testing whether two expressions are equal, however for the purposes of
3833 /// looking for a condition guarding a loop, it can be useful to be a little
3834 /// more general, since a front-end may have replicated the controlling
3837 static bool HasSameValue(const SCEV* A, const SCEV* B) {
3838 // Quick check to see if they are the same SCEV.
3839 if (A == B) return true;
3841 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
3842 // two different instructions with the same value. Check for this case.
3843 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
3844 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
3845 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
3846 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
3847 if (AI->isIdenticalTo(BI))
3850 // Otherwise assume they may have a different value.
3854 /// isLoopGuardedByCond - Test whether entry to the loop is protected by
3855 /// a conditional between LHS and RHS. This is used to help avoid max
3856 /// expressions in loop trip counts.
3857 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L,
3858 ICmpInst::Predicate Pred,
3859 const SCEV *LHS, const SCEV *RHS) {
3860 // Interpret a null as meaning no loop, where there is obviously no guard
3861 // (interprocedural conditions notwithstanding).
3862 if (!L) return false;
3864 BasicBlock *Predecessor = getLoopPredecessor(L);
3865 BasicBlock *PredecessorDest = L->getHeader();
3867 // Starting at the loop predecessor, climb up the predecessor chain, as long
3868 // as there are predecessors that can be found that have unique successors
3869 // leading to the original header.
3871 PredecessorDest = Predecessor,
3872 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
3874 BranchInst *LoopEntryPredicate =
3875 dyn_cast<BranchInst>(Predecessor->getTerminator());
3876 if (!LoopEntryPredicate ||
3877 LoopEntryPredicate->isUnconditional())
3880 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
3881 LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
3888 /// isNecessaryCond - Test whether the given CondValue value is a condition
3889 /// which is at least as strict as the one described by Pred, LHS, and RHS.
3890 bool ScalarEvolution::isNecessaryCond(Value *CondValue,
3891 ICmpInst::Predicate Pred,
3892 const SCEV *LHS, const SCEV *RHS,
3894 // Recursivly handle And and Or conditions.
3895 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
3896 if (BO->getOpcode() == Instruction::And) {
3898 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
3899 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
3900 } else if (BO->getOpcode() == Instruction::Or) {
3902 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
3903 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
3907 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
3908 if (!ICI) return false;
3910 // Now that we found a conditional branch that dominates the loop, check to
3911 // see if it is the comparison we are looking for.
3912 Value *PreCondLHS = ICI->getOperand(0);
3913 Value *PreCondRHS = ICI->getOperand(1);
3914 ICmpInst::Predicate Cond;
3916 Cond = ICI->getInversePredicate();
3918 Cond = ICI->getPredicate();
3921 ; // An exact match.
3922 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE)
3923 ; // The actual condition is beyond sufficient.
3925 // Check a few special cases.
3927 case ICmpInst::ICMP_UGT:
3928 if (Pred == ICmpInst::ICMP_ULT) {
3929 std::swap(PreCondLHS, PreCondRHS);
3930 Cond = ICmpInst::ICMP_ULT;
3934 case ICmpInst::ICMP_SGT:
3935 if (Pred == ICmpInst::ICMP_SLT) {
3936 std::swap(PreCondLHS, PreCondRHS);
3937 Cond = ICmpInst::ICMP_SLT;
3941 case ICmpInst::ICMP_NE:
3942 // Expressions like (x >u 0) are often canonicalized to (x != 0),
3943 // so check for this case by checking if the NE is comparing against
3944 // a minimum or maximum constant.
3945 if (!ICmpInst::isTrueWhenEqual(Pred))
3946 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) {
3947 const APInt &A = CI->getValue();
3949 case ICmpInst::ICMP_SLT:
3950 if (A.isMaxSignedValue()) break;
3952 case ICmpInst::ICMP_SGT:
3953 if (A.isMinSignedValue()) break;
3955 case ICmpInst::ICMP_ULT:
3956 if (A.isMaxValue()) break;
3958 case ICmpInst::ICMP_UGT:
3959 if (A.isMinValue()) break;
3964 Cond = ICmpInst::ICMP_NE;
3965 // NE is symmetric but the original comparison may not be. Swap
3966 // the operands if necessary so that they match below.
3967 if (isa<SCEVConstant>(LHS))
3968 std::swap(PreCondLHS, PreCondRHS);
3973 // We weren't able to reconcile the condition.
3977 if (!PreCondLHS->getType()->isInteger()) return false;
3979 const SCEV *PreCondLHSSCEV = getSCEV(PreCondLHS);
3980 const SCEV *PreCondRHSSCEV = getSCEV(PreCondRHS);
3981 return (HasSameValue(LHS, PreCondLHSSCEV) &&
3982 HasSameValue(RHS, PreCondRHSSCEV)) ||
3983 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) &&
3984 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)));
3987 /// getBECount - Subtract the end and start values and divide by the step,
3988 /// rounding up, to get the number of times the backedge is executed. Return
3989 /// CouldNotCompute if an intermediate computation overflows.
3990 const SCEV* ScalarEvolution::getBECount(const SCEV* Start,
3993 const Type *Ty = Start->getType();
3994 const SCEV* NegOne = getIntegerSCEV(-1, Ty);
3995 const SCEV* Diff = getMinusSCEV(End, Start);
3996 const SCEV* RoundUp = getAddExpr(Step, NegOne);
3998 // Add an adjustment to the difference between End and Start so that
3999 // the division will effectively round up.
4000 const SCEV* Add = getAddExpr(Diff, RoundUp);
4002 // Check Add for unsigned overflow.
4003 // TODO: More sophisticated things could be done here.
4004 const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1);
4005 const SCEV* OperandExtendedAdd =
4006 getAddExpr(getZeroExtendExpr(Diff, WideTy),
4007 getZeroExtendExpr(RoundUp, WideTy));
4008 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4009 return CouldNotCompute;
4011 return getUDivExpr(Add, Step);
4014 /// HowManyLessThans - Return the number of times a backedge containing the
4015 /// specified less-than comparison will execute. If not computable, return
4016 /// CouldNotCompute.
4017 ScalarEvolution::BackedgeTakenInfo
4018 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4019 const Loop *L, bool isSigned) {
4020 // Only handle: "ADDREC < LoopInvariant".
4021 if (!RHS->isLoopInvariant(L)) return CouldNotCompute;
4023 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4024 if (!AddRec || AddRec->getLoop() != L)
4025 return CouldNotCompute;
4027 if (AddRec->isAffine()) {
4028 // FORNOW: We only support unit strides.
4029 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4030 const SCEV* Step = AddRec->getStepRecurrence(*this);
4032 // TODO: handle non-constant strides.
4033 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4034 if (!CStep || CStep->isZero())
4035 return CouldNotCompute;
4036 if (CStep->isOne()) {
4037 // With unit stride, the iteration never steps past the limit value.
4038 } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4039 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4040 // Test whether a positive iteration iteration can step past the limit
4041 // value and past the maximum value for its type in a single step.
4043 APInt Max = APInt::getSignedMaxValue(BitWidth);
4044 if ((Max - CStep->getValue()->getValue())
4045 .slt(CLimit->getValue()->getValue()))
4046 return CouldNotCompute;
4048 APInt Max = APInt::getMaxValue(BitWidth);
4049 if ((Max - CStep->getValue()->getValue())
4050 .ult(CLimit->getValue()->getValue()))
4051 return CouldNotCompute;
4054 // TODO: handle non-constant limit values below.
4055 return CouldNotCompute;
4057 // TODO: handle negative strides below.
4058 return CouldNotCompute;
4060 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4061 // m. So, we count the number of iterations in which {n,+,s} < m is true.
4062 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4063 // treat m-n as signed nor unsigned due to overflow possibility.
4065 // First, we get the value of the LHS in the first iteration: n
4066 const SCEV* Start = AddRec->getOperand(0);
4068 // Determine the minimum constant start value.
4069 const SCEV *MinStart = isa<SCEVConstant>(Start) ? Start :
4070 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) :
4071 APInt::getMinValue(BitWidth));
4073 // If we know that the condition is true in order to enter the loop,
4074 // then we know that it will run exactly (m-n)/s times. Otherwise, we
4075 // only know that it will execute (max(m,n)-n)/s times. In both cases,
4076 // the division must round up.
4077 const SCEV* End = RHS;
4078 if (!isLoopGuardedByCond(L,
4079 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
4080 getMinusSCEV(Start, Step), RHS))
4081 End = isSigned ? getSMaxExpr(RHS, Start)
4082 : getUMaxExpr(RHS, Start);
4084 // Determine the maximum constant end value.
4085 const SCEV* MaxEnd =
4086 isa<SCEVConstant>(End) ? End :
4087 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth)
4088 .ashr(GetMinSignBits(End) - 1) :
4089 APInt::getMaxValue(BitWidth)
4090 .lshr(GetMinLeadingZeros(End)));
4092 // Finally, we subtract these two values and divide, rounding up, to get
4093 // the number of times the backedge is executed.
4094 const SCEV* BECount = getBECount(Start, End, Step);
4096 // The maximum backedge count is similar, except using the minimum start
4097 // value and the maximum end value.
4098 const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step);;
4100 return BackedgeTakenInfo(BECount, MaxBECount);
4103 return CouldNotCompute;
4106 /// getNumIterationsInRange - Return the number of iterations of this loop that
4107 /// produce values in the specified constant range. Another way of looking at
4108 /// this is that it returns the first iteration number where the value is not in
4109 /// the condition, thus computing the exit count. If the iteration count can't
4110 /// be computed, an instance of SCEVCouldNotCompute is returned.
4111 const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4112 ScalarEvolution &SE) const {
4113 if (Range.isFullSet()) // Infinite loop.
4114 return SE.getCouldNotCompute();
4116 // If the start is a non-zero constant, shift the range to simplify things.
4117 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4118 if (!SC->getValue()->isZero()) {
4119 SmallVector<const SCEV*, 4> Operands(op_begin(), op_end());
4120 Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4121 const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop());
4122 if (const SCEVAddRecExpr *ShiftedAddRec =
4123 dyn_cast<SCEVAddRecExpr>(Shifted))
4124 return ShiftedAddRec->getNumIterationsInRange(
4125 Range.subtract(SC->getValue()->getValue()), SE);
4126 // This is strange and shouldn't happen.
4127 return SE.getCouldNotCompute();
4130 // The only time we can solve this is when we have all constant indices.
4131 // Otherwise, we cannot determine the overflow conditions.
4132 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4133 if (!isa<SCEVConstant>(getOperand(i)))
4134 return SE.getCouldNotCompute();
4137 // Okay at this point we know that all elements of the chrec are constants and
4138 // that the start element is zero.
4140 // First check to see if the range contains zero. If not, the first
4142 unsigned BitWidth = SE.getTypeSizeInBits(getType());
4143 if (!Range.contains(APInt(BitWidth, 0)))
4144 return SE.getIntegerSCEV(0, getType());
4147 // If this is an affine expression then we have this situation:
4148 // Solve {0,+,A} in Range === Ax in Range
4150 // We know that zero is in the range. If A is positive then we know that
4151 // the upper value of the range must be the first possible exit value.
4152 // If A is negative then the lower of the range is the last possible loop
4153 // value. Also note that we already checked for a full range.
4154 APInt One(BitWidth,1);
4155 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4156 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4158 // The exit value should be (End+A)/A.
4159 APInt ExitVal = (End + A).udiv(A);
4160 ConstantInt *ExitValue = ConstantInt::get(ExitVal);
4162 // Evaluate at the exit value. If we really did fall out of the valid
4163 // range, then we computed our trip count, otherwise wrap around or other
4164 // things must have happened.
4165 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4166 if (Range.contains(Val->getValue()))
4167 return SE.getCouldNotCompute(); // Something strange happened
4169 // Ensure that the previous value is in the range. This is a sanity check.
4170 assert(Range.contains(
4171 EvaluateConstantChrecAtConstant(this,
4172 ConstantInt::get(ExitVal - One), SE)->getValue()) &&
4173 "Linear scev computation is off in a bad way!");
4174 return SE.getConstant(ExitValue);
4175 } else if (isQuadratic()) {
4176 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4177 // quadratic equation to solve it. To do this, we must frame our problem in
4178 // terms of figuring out when zero is crossed, instead of when
4179 // Range.getUpper() is crossed.
4180 SmallVector<const SCEV*, 4> NewOps(op_begin(), op_end());
4181 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4182 const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4184 // Next, solve the constructed addrec
4185 std::pair<const SCEV*,const SCEV*> Roots =
4186 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4187 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4188 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4190 // Pick the smallest positive root value.
4191 if (ConstantInt *CB =
4192 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4193 R1->getValue(), R2->getValue()))) {
4194 if (CB->getZExtValue() == false)
4195 std::swap(R1, R2); // R1 is the minimum root now.
4197 // Make sure the root is not off by one. The returned iteration should
4198 // not be in the range, but the previous one should be. When solving
4199 // for "X*X < 5", for example, we should not return a root of 2.
4200 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4203 if (Range.contains(R1Val->getValue())) {
4204 // The next iteration must be out of the range...
4205 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()+1);
4207 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4208 if (!Range.contains(R1Val->getValue()))
4209 return SE.getConstant(NextVal);
4210 return SE.getCouldNotCompute(); // Something strange happened
4213 // If R1 was not in the range, then it is a good return value. Make
4214 // sure that R1-1 WAS in the range though, just in case.
4215 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()-1);
4216 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4217 if (Range.contains(R1Val->getValue()))
4219 return SE.getCouldNotCompute(); // Something strange happened
4224 return SE.getCouldNotCompute();
4229 //===----------------------------------------------------------------------===//
4230 // SCEVCallbackVH Class Implementation
4231 //===----------------------------------------------------------------------===//
4233 void ScalarEvolution::SCEVCallbackVH::deleted() {
4234 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4235 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4236 SE->ConstantEvolutionLoopExitValue.erase(PN);
4237 if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4238 SE->ValuesAtScopes.erase(I);
4239 SE->Scalars.erase(getValPtr());
4240 // this now dangles!
4243 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4244 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4246 // Forget all the expressions associated with users of the old value,
4247 // so that future queries will recompute the expressions using the new
4249 SmallVector<User *, 16> Worklist;
4250 Value *Old = getValPtr();
4251 bool DeleteOld = false;
4252 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4254 Worklist.push_back(*UI);
4255 while (!Worklist.empty()) {
4256 User *U = Worklist.pop_back_val();
4257 // Deleting the Old value will cause this to dangle. Postpone
4258 // that until everything else is done.
4263 if (PHINode *PN = dyn_cast<PHINode>(U))
4264 SE->ConstantEvolutionLoopExitValue.erase(PN);
4265 if (Instruction *I = dyn_cast<Instruction>(U))
4266 SE->ValuesAtScopes.erase(I);
4267 if (SE->Scalars.erase(U))
4268 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4270 Worklist.push_back(*UI);
4273 if (PHINode *PN = dyn_cast<PHINode>(Old))
4274 SE->ConstantEvolutionLoopExitValue.erase(PN);
4275 if (Instruction *I = dyn_cast<Instruction>(Old))
4276 SE->ValuesAtScopes.erase(I);
4277 SE->Scalars.erase(Old);
4278 // this now dangles!
4283 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4284 : CallbackVH(V), SE(se) {}
4286 //===----------------------------------------------------------------------===//
4287 // ScalarEvolution Class Implementation
4288 //===----------------------------------------------------------------------===//
4290 ScalarEvolution::ScalarEvolution()
4291 : FunctionPass(&ID), CouldNotCompute(new SCEVCouldNotCompute()) {
4294 bool ScalarEvolution::runOnFunction(Function &F) {
4296 LI = &getAnalysis<LoopInfo>();
4297 TD = getAnalysisIfAvailable<TargetData>();
4301 void ScalarEvolution::releaseMemory() {
4303 BackedgeTakenCounts.clear();
4304 ConstantEvolutionLoopExitValue.clear();
4305 ValuesAtScopes.clear();
4307 for (std::map<ConstantInt*, SCEVConstant*>::iterator
4308 I = SCEVConstants.begin(), E = SCEVConstants.end(); I != E; ++I)
4310 for (std::map<std::pair<const SCEV*, const Type*>,
4311 SCEVTruncateExpr*>::iterator I = SCEVTruncates.begin(),
4312 E = SCEVTruncates.end(); I != E; ++I)
4314 for (std::map<std::pair<const SCEV*, const Type*>,
4315 SCEVZeroExtendExpr*>::iterator I = SCEVZeroExtends.begin(),
4316 E = SCEVZeroExtends.end(); I != E; ++I)
4318 for (std::map<std::pair<unsigned, std::vector<const SCEV*> >,
4319 SCEVCommutativeExpr*>::iterator I = SCEVCommExprs.begin(),
4320 E = SCEVCommExprs.end(); I != E; ++I)
4322 for (std::map<std::pair<const SCEV*, const SCEV*>, SCEVUDivExpr*>::iterator
4323 I = SCEVUDivs.begin(), E = SCEVUDivs.end(); I != E; ++I)
4325 for (std::map<std::pair<const SCEV*, const Type*>,
4326 SCEVSignExtendExpr*>::iterator I = SCEVSignExtends.begin(),
4327 E = SCEVSignExtends.end(); I != E; ++I)
4329 for (std::map<std::pair<const Loop *, std::vector<const SCEV*> >,
4330 SCEVAddRecExpr*>::iterator I = SCEVAddRecExprs.begin(),
4331 E = SCEVAddRecExprs.end(); I != E; ++I)
4333 for (std::map<Value*, SCEVUnknown*>::iterator I = SCEVUnknowns.begin(),
4334 E = SCEVUnknowns.end(); I != E; ++I)
4337 SCEVConstants.clear();
4338 SCEVTruncates.clear();
4339 SCEVZeroExtends.clear();
4340 SCEVCommExprs.clear();
4342 SCEVSignExtends.clear();
4343 SCEVAddRecExprs.clear();
4344 SCEVUnknowns.clear();
4347 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4348 AU.setPreservesAll();
4349 AU.addRequiredTransitive<LoopInfo>();
4352 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4353 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4356 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4358 // Print all inner loops first
4359 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4360 PrintLoopInfo(OS, SE, *I);
4362 OS << "Loop " << L->getHeader()->getName() << ": ";
4364 SmallVector<BasicBlock*, 8> ExitBlocks;
4365 L->getExitBlocks(ExitBlocks);
4366 if (ExitBlocks.size() != 1)
4367 OS << "<multiple exits> ";
4369 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4370 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4372 OS << "Unpredictable backedge-taken count. ";
4376 OS << "Loop " << L->getHeader()->getName() << ": ";
4378 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4379 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4381 OS << "Unpredictable max backedge-taken count. ";
4387 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4388 // ScalarEvolution's implementaiton of the print method is to print
4389 // out SCEV values of all instructions that are interesting. Doing
4390 // this potentially causes it to create new SCEV objects though,
4391 // which technically conflicts with the const qualifier. This isn't
4392 // observable from outside the class though (the hasSCEV function
4393 // notwithstanding), so casting away the const isn't dangerous.
4394 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4396 OS << "Classifying expressions for: " << F->getName() << "\n";
4397 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4398 if (isSCEVable(I->getType())) {
4401 const SCEV* SV = SE.getSCEV(&*I);
4404 const Loop *L = LI->getLoopFor((*I).getParent());
4406 const SCEV* AtUse = SE.getSCEVAtScope(SV, L);
4413 OS << "\t\t" "Exits: ";
4414 const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4415 if (!ExitValue->isLoopInvariant(L)) {
4416 OS << "<<Unknown>>";
4425 OS << "Determining loop execution counts for: " << F->getName() << "\n";
4426 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4427 PrintLoopInfo(OS, &SE, *I);
4430 void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4431 raw_os_ostream OS(o);