1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
38 //===----------------------------------------------------------------------===//
40 // There are several good references for the techniques used in this analysis.
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 // On computational properties of chains of recurrences
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
59 //===----------------------------------------------------------------------===//
61 #define DEBUG_TYPE "scalar-evolution"
62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
63 #include "llvm/Constants.h"
64 #include "llvm/DerivedTypes.h"
65 #include "llvm/GlobalVariable.h"
66 #include "llvm/GlobalAlias.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Operator.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/Dominators.h"
72 #include "llvm/Analysis/LoopInfo.h"
73 #include "llvm/Analysis/ValueTracking.h"
74 #include "llvm/Assembly/Writer.h"
75 #include "llvm/Target/TargetData.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/ConstantRange.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/GetElementPtrTypeIterator.h"
81 #include "llvm/Support/InstIterator.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include "llvm/ADT/Statistic.h"
85 #include "llvm/ADT/STLExtras.h"
86 #include "llvm/ADT/SmallPtrSet.h"
90 STATISTIC(NumArrayLenItCounts,
91 "Number of trip counts computed with array length");
92 STATISTIC(NumTripCountsComputed,
93 "Number of loops with predictable loop counts");
94 STATISTIC(NumTripCountsNotComputed,
95 "Number of loops without predictable loop counts");
96 STATISTIC(NumBruteForceTripCountsComputed,
97 "Number of loops with trip counts computed by force");
99 static cl::opt<unsigned>
100 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101 cl::desc("Maximum number of iterations SCEV will "
102 "symbolically execute a constant "
106 static RegisterPass<ScalarEvolution>
107 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108 char ScalarEvolution::ID = 0;
110 //===----------------------------------------------------------------------===//
111 // SCEV class definitions
112 //===----------------------------------------------------------------------===//
114 //===----------------------------------------------------------------------===//
115 // Implementation of the SCEV class.
120 void SCEV::dump() const {
125 bool SCEV::isZero() const {
126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
127 return SC->getValue()->isZero();
131 bool SCEV::isOne() const {
132 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
133 return SC->getValue()->isOne();
137 bool SCEV::isAllOnesValue() const {
138 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
139 return SC->getValue()->isAllOnesValue();
143 SCEVCouldNotCompute::SCEVCouldNotCompute() :
144 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
146 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
147 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
151 const Type *SCEVCouldNotCompute::getType() const {
152 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
156 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
157 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
161 bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
162 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
166 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
167 OS << "***COULDNOTCOMPUTE***";
170 bool SCEVCouldNotCompute::classof(const SCEV *S) {
171 return S->getSCEVType() == scCouldNotCompute;
174 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
176 ID.AddInteger(scConstant);
179 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
180 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
181 UniqueSCEVs.InsertNode(S, IP);
185 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
186 return getConstant(ConstantInt::get(getContext(), Val));
190 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
191 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
192 return getConstant(ConstantInt::get(ITy, V, isSigned));
195 const Type *SCEVConstant::getType() const { return V->getType(); }
197 void SCEVConstant::print(raw_ostream &OS) const {
198 WriteAsOperand(OS, V, false);
201 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
202 unsigned SCEVTy, const SCEV *op, const Type *ty)
203 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
205 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
206 return Op->dominates(BB, DT);
209 bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
210 return Op->properlyDominates(BB, DT);
213 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
214 const SCEV *op, const Type *ty)
215 : SCEVCastExpr(ID, scTruncate, op, ty) {
216 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
217 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
218 "Cannot truncate non-integer value!");
221 void SCEVTruncateExpr::print(raw_ostream &OS) const {
222 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
225 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
226 const SCEV *op, const Type *ty)
227 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
228 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
229 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
230 "Cannot zero extend non-integer value!");
233 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
234 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
237 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
238 const SCEV *op, const Type *ty)
239 : SCEVCastExpr(ID, scSignExtend, op, ty) {
240 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
241 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
242 "Cannot sign extend non-integer value!");
245 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
246 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
249 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
250 const char *OpStr = getOperationStr();
252 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
260 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
261 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
262 if (!getOperand(i)->dominates(BB, DT))
268 bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
269 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
270 if (!getOperand(i)->properlyDominates(BB, DT))
276 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
277 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
280 bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
281 return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
284 void SCEVUDivExpr::print(raw_ostream &OS) const {
285 OS << "(" << *LHS << " /u " << *RHS << ")";
288 const Type *SCEVUDivExpr::getType() const {
289 // In most cases the types of LHS and RHS will be the same, but in some
290 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
291 // depend on the type for correctness, but handling types carefully can
292 // avoid extra casts in the SCEVExpander. The LHS is more likely to be
293 // a pointer type than the RHS, so use the RHS' type here.
294 return RHS->getType();
297 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
298 // Add recurrences are never invariant in the function-body (null loop).
302 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
303 if (QueryLoop->contains(L))
306 // This recurrence is variant w.r.t. QueryLoop if any of its operands
308 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
309 if (!getOperand(i)->isLoopInvariant(QueryLoop))
312 // Otherwise it's loop-invariant.
317 SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
318 return DT->dominates(L->getHeader(), BB) &&
319 SCEVNAryExpr::dominates(BB, DT);
323 SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
324 // This uses a "dominates" query instead of "properly dominates" query because
325 // the instruction which produces the addrec's value is a PHI, and a PHI
326 // effectively properly dominates its entire containing block.
327 return DT->dominates(L->getHeader(), BB) &&
328 SCEVNAryExpr::properlyDominates(BB, DT);
331 void SCEVAddRecExpr::print(raw_ostream &OS) const {
332 OS << "{" << *Operands[0];
333 for (unsigned i = 1, e = NumOperands; i != e; ++i)
334 OS << ",+," << *Operands[i];
336 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
340 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
341 // All non-instruction values are loop invariant. All instructions are loop
342 // invariant if they are not contained in the specified loop.
343 // Instructions are never considered invariant in the function body
344 // (null loop) because they are defined within the "loop".
345 if (Instruction *I = dyn_cast<Instruction>(V))
346 return L && !L->contains(I);
350 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
351 if (Instruction *I = dyn_cast<Instruction>(getValue()))
352 return DT->dominates(I->getParent(), BB);
356 bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
357 if (Instruction *I = dyn_cast<Instruction>(getValue()))
358 return DT->properlyDominates(I->getParent(), BB);
362 const Type *SCEVUnknown::getType() const {
366 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
367 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
368 if (VCE->getOpcode() == Instruction::PtrToInt)
369 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
370 if (CE->getOpcode() == Instruction::GetElementPtr &&
371 CE->getOperand(0)->isNullValue() &&
372 CE->getNumOperands() == 2)
373 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
375 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
383 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
384 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
385 if (VCE->getOpcode() == Instruction::PtrToInt)
386 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
387 if (CE->getOpcode() == Instruction::GetElementPtr &&
388 CE->getOperand(0)->isNullValue()) {
390 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
391 if (const StructType *STy = dyn_cast<StructType>(Ty))
392 if (!STy->isPacked() &&
393 CE->getNumOperands() == 3 &&
394 CE->getOperand(1)->isNullValue()) {
395 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
397 STy->getNumElements() == 2 &&
398 STy->getElementType(0)->isIntegerTy(1)) {
399 AllocTy = STy->getElementType(1);
408 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
409 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
410 if (VCE->getOpcode() == Instruction::PtrToInt)
411 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
412 if (CE->getOpcode() == Instruction::GetElementPtr &&
413 CE->getNumOperands() == 3 &&
414 CE->getOperand(0)->isNullValue() &&
415 CE->getOperand(1)->isNullValue()) {
417 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
418 // Ignore vector types here so that ScalarEvolutionExpander doesn't
419 // emit getelementptrs that index into vectors.
420 if (Ty->isStructTy() || Ty->isArrayTy()) {
422 FieldNo = CE->getOperand(2);
430 void SCEVUnknown::print(raw_ostream &OS) const {
432 if (isSizeOf(AllocTy)) {
433 OS << "sizeof(" << *AllocTy << ")";
436 if (isAlignOf(AllocTy)) {
437 OS << "alignof(" << *AllocTy << ")";
443 if (isOffsetOf(CTy, FieldNo)) {
444 OS << "offsetof(" << *CTy << ", ";
445 WriteAsOperand(OS, FieldNo, false);
450 // Otherwise just print it normally.
451 WriteAsOperand(OS, V, false);
454 //===----------------------------------------------------------------------===//
456 //===----------------------------------------------------------------------===//
458 static bool CompareTypes(const Type *A, const Type *B) {
459 if (A->getTypeID() != B->getTypeID())
460 return A->getTypeID() < B->getTypeID();
461 if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
462 const IntegerType *BI = cast<IntegerType>(B);
463 return AI->getBitWidth() < BI->getBitWidth();
465 if (const PointerType *AI = dyn_cast<PointerType>(A)) {
466 const PointerType *BI = cast<PointerType>(B);
467 return CompareTypes(AI->getElementType(), BI->getElementType());
469 if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
470 const ArrayType *BI = cast<ArrayType>(B);
471 if (AI->getNumElements() != BI->getNumElements())
472 return AI->getNumElements() < BI->getNumElements();
473 return CompareTypes(AI->getElementType(), BI->getElementType());
475 if (const VectorType *AI = dyn_cast<VectorType>(A)) {
476 const VectorType *BI = cast<VectorType>(B);
477 if (AI->getNumElements() != BI->getNumElements())
478 return AI->getNumElements() < BI->getNumElements();
479 return CompareTypes(AI->getElementType(), BI->getElementType());
481 if (const StructType *AI = dyn_cast<StructType>(A)) {
482 const StructType *BI = cast<StructType>(B);
483 if (AI->getNumElements() != BI->getNumElements())
484 return AI->getNumElements() < BI->getNumElements();
485 for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
486 if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
487 CompareTypes(BI->getElementType(i), AI->getElementType(i)))
488 return CompareTypes(AI->getElementType(i), BI->getElementType(i));
494 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
495 /// than the complexity of the RHS. This comparator is used to canonicalize
497 class SCEVComplexityCompare {
500 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
502 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
503 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
507 // Primarily, sort the SCEVs by their getSCEVType().
508 unsigned LST = LHS->getSCEVType();
509 unsigned RST = RHS->getSCEVType();
513 // Then, pick an arbitrary sort. Use the profiling data for speed.
514 const FoldingSetNodeIDRef &L = LHS->getProfile();
515 const FoldingSetNodeIDRef &R = RHS->getProfile();
516 size_t LSize = L.getSize();
517 size_t RSize = R.getSize();
519 return LSize < RSize;
520 return memcmp(L.getData(), R.getData(),
521 LSize * sizeof(*L.getData())) < 0;
526 /// GroupByComplexity - Given a list of SCEV objects, order them by their
527 /// complexity, and group objects of the same complexity together by value.
528 /// When this routine is finished, we know that any duplicates in the vector are
529 /// consecutive and that complexity is monotonically increasing.
531 /// Note that we go take special precautions to ensure that we get deterministic
532 /// results from this routine. In other words, we don't want the results of
533 /// this to depend on where the addresses of various SCEV objects happened to
536 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
538 if (Ops.size() < 2) return; // Noop
540 SCEVComplexityCompare Comp(LI);
542 if (Ops.size() == 2) {
543 // This is the common case, which also happens to be trivially simple.
545 if (Comp(Ops[1], Ops[0]))
546 std::swap(Ops[0], Ops[1]);
550 std::stable_sort(Ops.begin(), Ops.end(), Comp);
555 //===----------------------------------------------------------------------===//
556 // Simple SCEV method implementations
557 //===----------------------------------------------------------------------===//
559 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
561 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
563 const Type* ResultTy) {
564 // Handle the simplest case efficiently.
566 return SE.getTruncateOrZeroExtend(It, ResultTy);
568 // We are using the following formula for BC(It, K):
570 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
572 // Suppose, W is the bitwidth of the return value. We must be prepared for
573 // overflow. Hence, we must assure that the result of our computation is
574 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
575 // safe in modular arithmetic.
577 // However, this code doesn't use exactly that formula; the formula it uses
578 // is something like the following, where T is the number of factors of 2 in
579 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
582 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
584 // This formula is trivially equivalent to the previous formula. However,
585 // this formula can be implemented much more efficiently. The trick is that
586 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
587 // arithmetic. To do exact division in modular arithmetic, all we have
588 // to do is multiply by the inverse. Therefore, this step can be done at
591 // The next issue is how to safely do the division by 2^T. The way this
592 // is done is by doing the multiplication step at a width of at least W + T
593 // bits. This way, the bottom W+T bits of the product are accurate. Then,
594 // when we perform the division by 2^T (which is equivalent to a right shift
595 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
596 // truncated out after the division by 2^T.
598 // In comparison to just directly using the first formula, this technique
599 // is much more efficient; using the first formula requires W * K bits,
600 // but this formula less than W + K bits. Also, the first formula requires
601 // a division step, whereas this formula only requires multiplies and shifts.
603 // It doesn't matter whether the subtraction step is done in the calculation
604 // width or the input iteration count's width; if the subtraction overflows,
605 // the result must be zero anyway. We prefer here to do it in the width of
606 // the induction variable because it helps a lot for certain cases; CodeGen
607 // isn't smart enough to ignore the overflow, which leads to much less
608 // efficient code if the width of the subtraction is wider than the native
611 // (It's possible to not widen at all by pulling out factors of 2 before
612 // the multiplication; for example, K=2 can be calculated as
613 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
614 // extra arithmetic, so it's not an obvious win, and it gets
615 // much more complicated for K > 3.)
617 // Protection from insane SCEVs; this bound is conservative,
618 // but it probably doesn't matter.
620 return SE.getCouldNotCompute();
622 unsigned W = SE.getTypeSizeInBits(ResultTy);
624 // Calculate K! / 2^T and T; we divide out the factors of two before
625 // multiplying for calculating K! / 2^T to avoid overflow.
626 // Other overflow doesn't matter because we only care about the bottom
627 // W bits of the result.
628 APInt OddFactorial(W, 1);
630 for (unsigned i = 3; i <= K; ++i) {
632 unsigned TwoFactors = Mult.countTrailingZeros();
634 Mult = Mult.lshr(TwoFactors);
635 OddFactorial *= Mult;
638 // We need at least W + T bits for the multiplication step
639 unsigned CalculationBits = W + T;
641 // Calculate 2^T, at width T+W.
642 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
644 // Calculate the multiplicative inverse of K! / 2^T;
645 // this multiplication factor will perform the exact division by
647 APInt Mod = APInt::getSignedMinValue(W+1);
648 APInt MultiplyFactor = OddFactorial.zext(W+1);
649 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
650 MultiplyFactor = MultiplyFactor.trunc(W);
652 // Calculate the product, at width T+W
653 const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
655 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
656 for (unsigned i = 1; i != K; ++i) {
657 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
658 Dividend = SE.getMulExpr(Dividend,
659 SE.getTruncateOrZeroExtend(S, CalculationTy));
663 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
665 // Truncate the result, and divide by K! / 2^T.
667 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
668 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
671 /// evaluateAtIteration - Return the value of this chain of recurrences at
672 /// the specified iteration number. We can evaluate this recurrence by
673 /// multiplying each element in the chain by the binomial coefficient
674 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
676 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
678 /// where BC(It, k) stands for binomial coefficient.
680 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
681 ScalarEvolution &SE) const {
682 const SCEV *Result = getStart();
683 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
684 // The computation is correct in the face of overflow provided that the
685 // multiplication is performed _after_ the evaluation of the binomial
687 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
688 if (isa<SCEVCouldNotCompute>(Coeff))
691 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
696 //===----------------------------------------------------------------------===//
697 // SCEV Expression folder implementations
698 //===----------------------------------------------------------------------===//
700 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
702 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
703 "This is not a truncating conversion!");
704 assert(isSCEVable(Ty) &&
705 "This is not a conversion to a SCEVable type!");
706 Ty = getEffectiveSCEVType(Ty);
709 ID.AddInteger(scTruncate);
713 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
715 // Fold if the operand is constant.
716 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
718 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
720 // trunc(trunc(x)) --> trunc(x)
721 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
722 return getTruncateExpr(ST->getOperand(), Ty);
724 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
725 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
726 return getTruncateOrSignExtend(SS->getOperand(), Ty);
728 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
729 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
730 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
732 // If the input value is a chrec scev, truncate the chrec's operands.
733 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
734 SmallVector<const SCEV *, 4> Operands;
735 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
736 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
737 return getAddRecExpr(Operands, AddRec->getLoop());
740 // The cast wasn't folded; create an explicit cast node.
741 // Recompute the insert position, as it may have been invalidated.
742 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
743 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
745 UniqueSCEVs.InsertNode(S, IP);
749 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
751 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
752 "This is not an extending conversion!");
753 assert(isSCEVable(Ty) &&
754 "This is not a conversion to a SCEVable type!");
755 Ty = getEffectiveSCEVType(Ty);
757 // Fold if the operand is constant.
758 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
759 const Type *IntTy = getEffectiveSCEVType(Ty);
760 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
761 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
762 return getConstant(cast<ConstantInt>(C));
765 // zext(zext(x)) --> zext(x)
766 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
767 return getZeroExtendExpr(SZ->getOperand(), Ty);
769 // Before doing any expensive analysis, check to see if we've already
770 // computed a SCEV for this Op and Ty.
772 ID.AddInteger(scZeroExtend);
776 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
778 // If the input value is a chrec scev, and we can prove that the value
779 // did not overflow the old, smaller, value, we can zero extend all of the
780 // operands (often constants). This allows analysis of something like
781 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
782 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
783 if (AR->isAffine()) {
784 const SCEV *Start = AR->getStart();
785 const SCEV *Step = AR->getStepRecurrence(*this);
786 unsigned BitWidth = getTypeSizeInBits(AR->getType());
787 const Loop *L = AR->getLoop();
789 // If we have special knowledge that this addrec won't overflow,
790 // we don't need to do any further analysis.
791 if (AR->hasNoUnsignedWrap())
792 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
793 getZeroExtendExpr(Step, Ty),
796 // Check whether the backedge-taken count is SCEVCouldNotCompute.
797 // Note that this serves two purposes: It filters out loops that are
798 // simply not analyzable, and it covers the case where this code is
799 // being called from within backedge-taken count analysis, such that
800 // attempting to ask for the backedge-taken count would likely result
801 // in infinite recursion. In the later case, the analysis code will
802 // cope with a conservative value, and it will take care to purge
803 // that value once it has finished.
804 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
805 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
806 // Manually compute the final value for AR, checking for
809 // Check whether the backedge-taken count can be losslessly casted to
810 // the addrec's type. The count is always unsigned.
811 const SCEV *CastedMaxBECount =
812 getTruncateOrZeroExtend(MaxBECount, Start->getType());
813 const SCEV *RecastedMaxBECount =
814 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
815 if (MaxBECount == RecastedMaxBECount) {
816 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
817 // Check whether Start+Step*MaxBECount has no unsigned overflow.
818 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
819 const SCEV *Add = getAddExpr(Start, ZMul);
820 const SCEV *OperandExtendedAdd =
821 getAddExpr(getZeroExtendExpr(Start, WideTy),
822 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
823 getZeroExtendExpr(Step, WideTy)));
824 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
825 // Return the expression with the addrec on the outside.
826 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
827 getZeroExtendExpr(Step, Ty),
830 // Similar to above, only this time treat the step value as signed.
831 // This covers loops that count down.
832 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
833 Add = getAddExpr(Start, SMul);
835 getAddExpr(getZeroExtendExpr(Start, WideTy),
836 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
837 getSignExtendExpr(Step, WideTy)));
838 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
839 // Return the expression with the addrec on the outside.
840 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
841 getSignExtendExpr(Step, Ty),
845 // If the backedge is guarded by a comparison with the pre-inc value
846 // the addrec is safe. Also, if the entry is guarded by a comparison
847 // with the start value and the backedge is guarded by a comparison
848 // with the post-inc value, the addrec is safe.
849 if (isKnownPositive(Step)) {
850 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
851 getUnsignedRange(Step).getUnsignedMax());
852 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
853 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
854 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
855 AR->getPostIncExpr(*this), N)))
856 // Return the expression with the addrec on the outside.
857 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
858 getZeroExtendExpr(Step, Ty),
860 } else if (isKnownNegative(Step)) {
861 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
862 getSignedRange(Step).getSignedMin());
863 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
864 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
865 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
866 AR->getPostIncExpr(*this), N)))
867 // Return the expression with the addrec on the outside.
868 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
869 getSignExtendExpr(Step, Ty),
875 // The cast wasn't folded; create an explicit cast node.
876 // Recompute the insert position, as it may have been invalidated.
877 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
878 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
880 UniqueSCEVs.InsertNode(S, IP);
884 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
886 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
887 "This is not an extending conversion!");
888 assert(isSCEVable(Ty) &&
889 "This is not a conversion to a SCEVable type!");
890 Ty = getEffectiveSCEVType(Ty);
892 // Fold if the operand is constant.
893 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
894 const Type *IntTy = getEffectiveSCEVType(Ty);
895 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
896 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
897 return getConstant(cast<ConstantInt>(C));
900 // sext(sext(x)) --> sext(x)
901 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
902 return getSignExtendExpr(SS->getOperand(), Ty);
904 // Before doing any expensive analysis, check to see if we've already
905 // computed a SCEV for this Op and Ty.
907 ID.AddInteger(scSignExtend);
911 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
913 // If the input value is a chrec scev, and we can prove that the value
914 // did not overflow the old, smaller, value, we can sign extend all of the
915 // operands (often constants). This allows analysis of something like
916 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
917 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
918 if (AR->isAffine()) {
919 const SCEV *Start = AR->getStart();
920 const SCEV *Step = AR->getStepRecurrence(*this);
921 unsigned BitWidth = getTypeSizeInBits(AR->getType());
922 const Loop *L = AR->getLoop();
924 // If we have special knowledge that this addrec won't overflow,
925 // we don't need to do any further analysis.
926 if (AR->hasNoSignedWrap())
927 return getAddRecExpr(getSignExtendExpr(Start, Ty),
928 getSignExtendExpr(Step, Ty),
931 // Check whether the backedge-taken count is SCEVCouldNotCompute.
932 // Note that this serves two purposes: It filters out loops that are
933 // simply not analyzable, and it covers the case where this code is
934 // being called from within backedge-taken count analysis, such that
935 // attempting to ask for the backedge-taken count would likely result
936 // in infinite recursion. In the later case, the analysis code will
937 // cope with a conservative value, and it will take care to purge
938 // that value once it has finished.
939 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
940 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
941 // Manually compute the final value for AR, checking for
944 // Check whether the backedge-taken count can be losslessly casted to
945 // the addrec's type. The count is always unsigned.
946 const SCEV *CastedMaxBECount =
947 getTruncateOrZeroExtend(MaxBECount, Start->getType());
948 const SCEV *RecastedMaxBECount =
949 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
950 if (MaxBECount == RecastedMaxBECount) {
951 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
952 // Check whether Start+Step*MaxBECount has no signed overflow.
953 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
954 const SCEV *Add = getAddExpr(Start, SMul);
955 const SCEV *OperandExtendedAdd =
956 getAddExpr(getSignExtendExpr(Start, WideTy),
957 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
958 getSignExtendExpr(Step, WideTy)));
959 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
960 // Return the expression with the addrec on the outside.
961 return getAddRecExpr(getSignExtendExpr(Start, Ty),
962 getSignExtendExpr(Step, Ty),
965 // Similar to above, only this time treat the step value as unsigned.
966 // This covers loops that count up with an unsigned step.
967 const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
968 Add = getAddExpr(Start, UMul);
970 getAddExpr(getSignExtendExpr(Start, WideTy),
971 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
972 getZeroExtendExpr(Step, WideTy)));
973 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
974 // Return the expression with the addrec on the outside.
975 return getAddRecExpr(getSignExtendExpr(Start, Ty),
976 getZeroExtendExpr(Step, Ty),
980 // If the backedge is guarded by a comparison with the pre-inc value
981 // the addrec is safe. Also, if the entry is guarded by a comparison
982 // with the start value and the backedge is guarded by a comparison
983 // with the post-inc value, the addrec is safe.
984 if (isKnownPositive(Step)) {
985 const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
986 getSignedRange(Step).getSignedMax());
987 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
988 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
989 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
990 AR->getPostIncExpr(*this), N)))
991 // Return the expression with the addrec on the outside.
992 return getAddRecExpr(getSignExtendExpr(Start, Ty),
993 getSignExtendExpr(Step, Ty),
995 } else if (isKnownNegative(Step)) {
996 const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
997 getSignedRange(Step).getSignedMin());
998 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
999 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1000 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1001 AR->getPostIncExpr(*this), N)))
1002 // Return the expression with the addrec on the outside.
1003 return getAddRecExpr(getSignExtendExpr(Start, Ty),
1004 getSignExtendExpr(Step, Ty),
1010 // The cast wasn't folded; create an explicit cast node.
1011 // Recompute the insert position, as it may have been invalidated.
1012 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1013 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1015 UniqueSCEVs.InsertNode(S, IP);
1019 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1020 /// unspecified bits out to the given type.
1022 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1024 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1025 "This is not an extending conversion!");
1026 assert(isSCEVable(Ty) &&
1027 "This is not a conversion to a SCEVable type!");
1028 Ty = getEffectiveSCEVType(Ty);
1030 // Sign-extend negative constants.
1031 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1032 if (SC->getValue()->getValue().isNegative())
1033 return getSignExtendExpr(Op, Ty);
1035 // Peel off a truncate cast.
1036 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1037 const SCEV *NewOp = T->getOperand();
1038 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1039 return getAnyExtendExpr(NewOp, Ty);
1040 return getTruncateOrNoop(NewOp, Ty);
1043 // Next try a zext cast. If the cast is folded, use it.
1044 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1045 if (!isa<SCEVZeroExtendExpr>(ZExt))
1048 // Next try a sext cast. If the cast is folded, use it.
1049 const SCEV *SExt = getSignExtendExpr(Op, Ty);
1050 if (!isa<SCEVSignExtendExpr>(SExt))
1053 // Force the cast to be folded into the operands of an addrec.
1054 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1055 SmallVector<const SCEV *, 4> Ops;
1056 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1058 Ops.push_back(getAnyExtendExpr(*I, Ty));
1059 return getAddRecExpr(Ops, AR->getLoop());
1062 // If the expression is obviously signed, use the sext cast value.
1063 if (isa<SCEVSMaxExpr>(Op))
1066 // Absent any other information, use the zext cast value.
1070 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1071 /// a list of operands to be added under the given scale, update the given
1072 /// map. This is a helper function for getAddRecExpr. As an example of
1073 /// what it does, given a sequence of operands that would form an add
1074 /// expression like this:
1076 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1078 /// where A and B are constants, update the map with these values:
1080 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1082 /// and add 13 + A*B*29 to AccumulatedConstant.
1083 /// This will allow getAddRecExpr to produce this:
1085 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1087 /// This form often exposes folding opportunities that are hidden in
1088 /// the original operand list.
1090 /// Return true iff it appears that any interesting folding opportunities
1091 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1092 /// the common case where no interesting opportunities are present, and
1093 /// is also used as a check to avoid infinite recursion.
1096 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1097 SmallVector<const SCEV *, 8> &NewOps,
1098 APInt &AccumulatedConstant,
1099 const SCEV *const *Ops, size_t NumOperands,
1101 ScalarEvolution &SE) {
1102 bool Interesting = false;
1104 // Iterate over the add operands.
1105 for (unsigned i = 0, e = NumOperands; i != e; ++i) {
1106 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1107 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1109 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1110 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1111 // A multiplication of a constant with another add; recurse.
1112 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1114 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1115 Add->op_begin(), Add->getNumOperands(),
1118 // A multiplication of a constant with some other value. Update
1120 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1121 const SCEV *Key = SE.getMulExpr(MulOps);
1122 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1123 M.insert(std::make_pair(Key, NewScale));
1125 NewOps.push_back(Pair.first->first);
1127 Pair.first->second += NewScale;
1128 // The map already had an entry for this value, which may indicate
1129 // a folding opportunity.
1133 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1134 // Pull a buried constant out to the outside.
1135 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1137 AccumulatedConstant += Scale * C->getValue()->getValue();
1139 // An ordinary operand. Update the map.
1140 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1141 M.insert(std::make_pair(Ops[i], Scale));
1143 NewOps.push_back(Pair.first->first);
1145 Pair.first->second += Scale;
1146 // The map already had an entry for this value, which may indicate
1147 // a folding opportunity.
1157 struct APIntCompare {
1158 bool operator()(const APInt &LHS, const APInt &RHS) const {
1159 return LHS.ult(RHS);
1164 /// getAddExpr - Get a canonical add expression, or something simpler if
1166 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1167 bool HasNUW, bool HasNSW) {
1168 assert(!Ops.empty() && "Cannot get empty add!");
1169 if (Ops.size() == 1) return Ops[0];
1171 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1172 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1173 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1174 "SCEVAddExpr operand types don't match!");
1177 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1178 if (!HasNUW && HasNSW) {
1180 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1181 if (!isKnownNonNegative(Ops[i])) {
1185 if (All) HasNUW = true;
1188 // Sort by complexity, this groups all similar expression types together.
1189 GroupByComplexity(Ops, LI);
1191 // If there are any constants, fold them together.
1193 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1195 assert(Idx < Ops.size());
1196 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1197 // We found two constants, fold them together!
1198 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1199 RHSC->getValue()->getValue());
1200 if (Ops.size() == 2) return Ops[0];
1201 Ops.erase(Ops.begin()+1); // Erase the folded element
1202 LHSC = cast<SCEVConstant>(Ops[0]);
1205 // If we are left with a constant zero being added, strip it off.
1206 if (LHSC->getValue()->isZero()) {
1207 Ops.erase(Ops.begin());
1211 if (Ops.size() == 1) return Ops[0];
1214 // Okay, check to see if the same value occurs in the operand list twice. If
1215 // so, merge them together into an multiply expression. Since we sorted the
1216 // list, these values are required to be adjacent.
1217 const Type *Ty = Ops[0]->getType();
1218 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1219 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1220 // Found a match, merge the two values into a multiply, and add any
1221 // remaining values to the result.
1222 const SCEV *Two = getConstant(Ty, 2);
1223 const SCEV *Mul = getMulExpr(Ops[i], Two);
1224 if (Ops.size() == 2)
1226 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1228 return getAddExpr(Ops, HasNUW, HasNSW);
1231 // Check for truncates. If all the operands are truncated from the same
1232 // type, see if factoring out the truncate would permit the result to be
1233 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1234 // if the contents of the resulting outer trunc fold to something simple.
1235 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1236 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1237 const Type *DstType = Trunc->getType();
1238 const Type *SrcType = Trunc->getOperand()->getType();
1239 SmallVector<const SCEV *, 8> LargeOps;
1241 // Check all the operands to see if they can be represented in the
1242 // source type of the truncate.
1243 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1244 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1245 if (T->getOperand()->getType() != SrcType) {
1249 LargeOps.push_back(T->getOperand());
1250 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1251 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1252 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1253 SmallVector<const SCEV *, 8> LargeMulOps;
1254 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1255 if (const SCEVTruncateExpr *T =
1256 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1257 if (T->getOperand()->getType() != SrcType) {
1261 LargeMulOps.push_back(T->getOperand());
1262 } else if (const SCEVConstant *C =
1263 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1264 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1271 LargeOps.push_back(getMulExpr(LargeMulOps));
1278 // Evaluate the expression in the larger type.
1279 const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1280 // If it folds to something simple, use it. Otherwise, don't.
1281 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1282 return getTruncateExpr(Fold, DstType);
1286 // Skip past any other cast SCEVs.
1287 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1290 // If there are add operands they would be next.
1291 if (Idx < Ops.size()) {
1292 bool DeletedAdd = false;
1293 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1294 // If we have an add, expand the add operands onto the end of the operands
1296 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1297 Ops.erase(Ops.begin()+Idx);
1301 // If we deleted at least one add, we added operands to the end of the list,
1302 // and they are not necessarily sorted. Recurse to resort and resimplify
1303 // any operands we just acquired.
1305 return getAddExpr(Ops);
1308 // Skip over the add expression until we get to a multiply.
1309 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1312 // Check to see if there are any folding opportunities present with
1313 // operands multiplied by constant values.
1314 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1315 uint64_t BitWidth = getTypeSizeInBits(Ty);
1316 DenseMap<const SCEV *, APInt> M;
1317 SmallVector<const SCEV *, 8> NewOps;
1318 APInt AccumulatedConstant(BitWidth, 0);
1319 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1320 Ops.data(), Ops.size(),
1321 APInt(BitWidth, 1), *this)) {
1322 // Some interesting folding opportunity is present, so its worthwhile to
1323 // re-generate the operands list. Group the operands by constant scale,
1324 // to avoid multiplying by the same constant scale multiple times.
1325 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1326 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1327 E = NewOps.end(); I != E; ++I)
1328 MulOpLists[M.find(*I)->second].push_back(*I);
1329 // Re-generate the operands list.
1331 if (AccumulatedConstant != 0)
1332 Ops.push_back(getConstant(AccumulatedConstant));
1333 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1334 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1336 Ops.push_back(getMulExpr(getConstant(I->first),
1337 getAddExpr(I->second)));
1339 return getConstant(Ty, 0);
1340 if (Ops.size() == 1)
1342 return getAddExpr(Ops);
1346 // If we are adding something to a multiply expression, make sure the
1347 // something is not already an operand of the multiply. If so, merge it into
1349 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1350 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1351 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1352 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1353 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1354 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1355 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1356 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1357 if (Mul->getNumOperands() != 2) {
1358 // If the multiply has more than two operands, we must get the
1360 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1361 MulOps.erase(MulOps.begin()+MulOp);
1362 InnerMul = getMulExpr(MulOps);
1364 const SCEV *One = getConstant(Ty, 1);
1365 const SCEV *AddOne = getAddExpr(InnerMul, One);
1366 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1367 if (Ops.size() == 2) return OuterMul;
1369 Ops.erase(Ops.begin()+AddOp);
1370 Ops.erase(Ops.begin()+Idx-1);
1372 Ops.erase(Ops.begin()+Idx);
1373 Ops.erase(Ops.begin()+AddOp-1);
1375 Ops.push_back(OuterMul);
1376 return getAddExpr(Ops);
1379 // Check this multiply against other multiplies being added together.
1380 for (unsigned OtherMulIdx = Idx+1;
1381 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1383 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1384 // If MulOp occurs in OtherMul, we can fold the two multiplies
1386 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1387 OMulOp != e; ++OMulOp)
1388 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1389 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1390 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1391 if (Mul->getNumOperands() != 2) {
1392 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1394 MulOps.erase(MulOps.begin()+MulOp);
1395 InnerMul1 = getMulExpr(MulOps);
1397 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1398 if (OtherMul->getNumOperands() != 2) {
1399 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1400 OtherMul->op_end());
1401 MulOps.erase(MulOps.begin()+OMulOp);
1402 InnerMul2 = getMulExpr(MulOps);
1404 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1405 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1406 if (Ops.size() == 2) return OuterMul;
1407 Ops.erase(Ops.begin()+Idx);
1408 Ops.erase(Ops.begin()+OtherMulIdx-1);
1409 Ops.push_back(OuterMul);
1410 return getAddExpr(Ops);
1416 // If there are any add recurrences in the operands list, see if any other
1417 // added values are loop invariant. If so, we can fold them into the
1419 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1422 // Scan over all recurrences, trying to fold loop invariants into them.
1423 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1424 // Scan all of the other operands to this add and add them to the vector if
1425 // they are loop invariant w.r.t. the recurrence.
1426 SmallVector<const SCEV *, 8> LIOps;
1427 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1428 const Loop *AddRecLoop = AddRec->getLoop();
1429 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1430 if (Ops[i]->isLoopInvariant(AddRecLoop)) {
1431 LIOps.push_back(Ops[i]);
1432 Ops.erase(Ops.begin()+i);
1436 // If we found some loop invariants, fold them into the recurrence.
1437 if (!LIOps.empty()) {
1438 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1439 LIOps.push_back(AddRec->getStart());
1441 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1443 AddRecOps[0] = getAddExpr(LIOps);
1445 // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
1446 // is not associative so this isn't necessarily safe.
1447 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
1449 // If all of the other operands were loop invariant, we are done.
1450 if (Ops.size() == 1) return NewRec;
1452 // Otherwise, add the folded AddRec by the non-liv parts.
1453 for (unsigned i = 0;; ++i)
1454 if (Ops[i] == AddRec) {
1458 return getAddExpr(Ops);
1461 // Okay, if there weren't any loop invariants to be folded, check to see if
1462 // there are multiple AddRec's with the same loop induction variable being
1463 // added together. If so, we can fold them.
1464 for (unsigned OtherIdx = Idx+1;
1465 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1466 if (OtherIdx != Idx) {
1467 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1468 if (AddRecLoop == OtherAddRec->getLoop()) {
1469 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
1470 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1472 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1473 if (i >= NewOps.size()) {
1474 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1475 OtherAddRec->op_end());
1478 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1480 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
1482 if (Ops.size() == 2) return NewAddRec;
1484 Ops.erase(Ops.begin()+Idx);
1485 Ops.erase(Ops.begin()+OtherIdx-1);
1486 Ops.push_back(NewAddRec);
1487 return getAddExpr(Ops);
1491 // Otherwise couldn't fold anything into this recurrence. Move onto the
1495 // Okay, it looks like we really DO need an add expr. Check to see if we
1496 // already have one, otherwise create a new one.
1497 FoldingSetNodeID ID;
1498 ID.AddInteger(scAddExpr);
1499 ID.AddInteger(Ops.size());
1500 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1501 ID.AddPointer(Ops[i]);
1504 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1506 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1507 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1508 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1510 UniqueSCEVs.InsertNode(S, IP);
1512 if (HasNUW) S->setHasNoUnsignedWrap(true);
1513 if (HasNSW) S->setHasNoSignedWrap(true);
1517 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1519 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1520 bool HasNUW, bool HasNSW) {
1521 assert(!Ops.empty() && "Cannot get empty mul!");
1522 if (Ops.size() == 1) return Ops[0];
1524 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1525 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1526 getEffectiveSCEVType(Ops[0]->getType()) &&
1527 "SCEVMulExpr operand types don't match!");
1530 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1531 if (!HasNUW && HasNSW) {
1533 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1534 if (!isKnownNonNegative(Ops[i])) {
1538 if (All) HasNUW = true;
1541 // Sort by complexity, this groups all similar expression types together.
1542 GroupByComplexity(Ops, LI);
1544 // If there are any constants, fold them together.
1546 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1548 // C1*(C2+V) -> C1*C2 + C1*V
1549 if (Ops.size() == 2)
1550 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1551 if (Add->getNumOperands() == 2 &&
1552 isa<SCEVConstant>(Add->getOperand(0)))
1553 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1554 getMulExpr(LHSC, Add->getOperand(1)));
1557 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1558 // We found two constants, fold them together!
1559 ConstantInt *Fold = ConstantInt::get(getContext(),
1560 LHSC->getValue()->getValue() *
1561 RHSC->getValue()->getValue());
1562 Ops[0] = getConstant(Fold);
1563 Ops.erase(Ops.begin()+1); // Erase the folded element
1564 if (Ops.size() == 1) return Ops[0];
1565 LHSC = cast<SCEVConstant>(Ops[0]);
1568 // If we are left with a constant one being multiplied, strip it off.
1569 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1570 Ops.erase(Ops.begin());
1572 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1573 // If we have a multiply of zero, it will always be zero.
1575 } else if (Ops[0]->isAllOnesValue()) {
1576 // If we have a mul by -1 of an add, try distributing the -1 among the
1578 if (Ops.size() == 2)
1579 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1580 SmallVector<const SCEV *, 4> NewOps;
1581 bool AnyFolded = false;
1582 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1584 const SCEV *Mul = getMulExpr(Ops[0], *I);
1585 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1586 NewOps.push_back(Mul);
1589 return getAddExpr(NewOps);
1593 if (Ops.size() == 1)
1597 // Skip over the add expression until we get to a multiply.
1598 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1601 // If there are mul operands inline them all into this expression.
1602 if (Idx < Ops.size()) {
1603 bool DeletedMul = false;
1604 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1605 // If we have an mul, expand the mul operands onto the end of the operands
1607 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1608 Ops.erase(Ops.begin()+Idx);
1612 // If we deleted at least one mul, we added operands to the end of the list,
1613 // and they are not necessarily sorted. Recurse to resort and resimplify
1614 // any operands we just acquired.
1616 return getMulExpr(Ops);
1619 // If there are any add recurrences in the operands list, see if any other
1620 // added values are loop invariant. If so, we can fold them into the
1622 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1625 // Scan over all recurrences, trying to fold loop invariants into them.
1626 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1627 // Scan all of the other operands to this mul and add them to the vector if
1628 // they are loop invariant w.r.t. the recurrence.
1629 SmallVector<const SCEV *, 8> LIOps;
1630 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1631 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1632 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1633 LIOps.push_back(Ops[i]);
1634 Ops.erase(Ops.begin()+i);
1638 // If we found some loop invariants, fold them into the recurrence.
1639 if (!LIOps.empty()) {
1640 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1641 SmallVector<const SCEV *, 4> NewOps;
1642 NewOps.reserve(AddRec->getNumOperands());
1643 if (LIOps.size() == 1) {
1644 const SCEV *Scale = LIOps[0];
1645 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1646 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1648 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1649 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1650 MulOps.push_back(AddRec->getOperand(i));
1651 NewOps.push_back(getMulExpr(MulOps));
1655 // It's tempting to propagate the NSW flag here, but nsw multiplication
1656 // is not associative so this isn't necessarily safe.
1657 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
1658 HasNUW && AddRec->hasNoUnsignedWrap(),
1661 // If all of the other operands were loop invariant, we are done.
1662 if (Ops.size() == 1) return NewRec;
1664 // Otherwise, multiply the folded AddRec by the non-liv parts.
1665 for (unsigned i = 0;; ++i)
1666 if (Ops[i] == AddRec) {
1670 return getMulExpr(Ops);
1673 // Okay, if there weren't any loop invariants to be folded, check to see if
1674 // there are multiple AddRec's with the same loop induction variable being
1675 // multiplied together. If so, we can fold them.
1676 for (unsigned OtherIdx = Idx+1;
1677 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1678 if (OtherIdx != Idx) {
1679 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1680 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1681 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
1682 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1683 const SCEV *NewStart = getMulExpr(F->getStart(),
1685 const SCEV *B = F->getStepRecurrence(*this);
1686 const SCEV *D = G->getStepRecurrence(*this);
1687 const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1690 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1692 if (Ops.size() == 2) return NewAddRec;
1694 Ops.erase(Ops.begin()+Idx);
1695 Ops.erase(Ops.begin()+OtherIdx-1);
1696 Ops.push_back(NewAddRec);
1697 return getMulExpr(Ops);
1701 // Otherwise couldn't fold anything into this recurrence. Move onto the
1705 // Okay, it looks like we really DO need an mul expr. Check to see if we
1706 // already have one, otherwise create a new one.
1707 FoldingSetNodeID ID;
1708 ID.AddInteger(scMulExpr);
1709 ID.AddInteger(Ops.size());
1710 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1711 ID.AddPointer(Ops[i]);
1714 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1716 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1717 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1718 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1720 UniqueSCEVs.InsertNode(S, IP);
1722 if (HasNUW) S->setHasNoUnsignedWrap(true);
1723 if (HasNSW) S->setHasNoSignedWrap(true);
1727 /// getUDivExpr - Get a canonical unsigned division expression, or something
1728 /// simpler if possible.
1729 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1731 assert(getEffectiveSCEVType(LHS->getType()) ==
1732 getEffectiveSCEVType(RHS->getType()) &&
1733 "SCEVUDivExpr operand types don't match!");
1735 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1736 if (RHSC->getValue()->equalsInt(1))
1737 return LHS; // X udiv 1 --> x
1738 // If the denominator is zero, the result of the udiv is undefined. Don't
1739 // try to analyze it, because the resolution chosen here may differ from
1740 // the resolution chosen in other parts of the compiler.
1741 if (!RHSC->getValue()->isZero()) {
1742 // Determine if the division can be folded into the operands of
1744 // TODO: Generalize this to non-constants by using known-bits information.
1745 const Type *Ty = LHS->getType();
1746 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1747 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1748 // For non-power-of-two values, effectively round the value up to the
1749 // nearest power of two.
1750 if (!RHSC->getValue()->getValue().isPowerOf2())
1752 const IntegerType *ExtTy =
1753 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1754 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1755 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1756 if (const SCEVConstant *Step =
1757 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1758 if (!Step->getValue()->getValue()
1759 .urem(RHSC->getValue()->getValue()) &&
1760 getZeroExtendExpr(AR, ExtTy) ==
1761 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1762 getZeroExtendExpr(Step, ExtTy),
1764 SmallVector<const SCEV *, 4> Operands;
1765 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1766 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1767 return getAddRecExpr(Operands, AR->getLoop());
1769 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1770 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1771 SmallVector<const SCEV *, 4> Operands;
1772 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1773 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1774 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1775 // Find an operand that's safely divisible.
1776 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1777 const SCEV *Op = M->getOperand(i);
1778 const SCEV *Div = getUDivExpr(Op, RHSC);
1779 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1780 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
1783 return getMulExpr(Operands);
1787 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1788 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1789 SmallVector<const SCEV *, 4> Operands;
1790 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1791 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1792 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1794 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1795 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1796 if (isa<SCEVUDivExpr>(Op) ||
1797 getMulExpr(Op, RHS) != A->getOperand(i))
1799 Operands.push_back(Op);
1801 if (Operands.size() == A->getNumOperands())
1802 return getAddExpr(Operands);
1806 // Fold if both operands are constant.
1807 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1808 Constant *LHSCV = LHSC->getValue();
1809 Constant *RHSCV = RHSC->getValue();
1810 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1816 FoldingSetNodeID ID;
1817 ID.AddInteger(scUDivExpr);
1821 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1822 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
1824 UniqueSCEVs.InsertNode(S, IP);
1829 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1830 /// Simplify the expression as much as possible.
1831 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1832 const SCEV *Step, const Loop *L,
1833 bool HasNUW, bool HasNSW) {
1834 SmallVector<const SCEV *, 4> Operands;
1835 Operands.push_back(Start);
1836 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1837 if (StepChrec->getLoop() == L) {
1838 Operands.insert(Operands.end(), StepChrec->op_begin(),
1839 StepChrec->op_end());
1840 return getAddRecExpr(Operands, L);
1843 Operands.push_back(Step);
1844 return getAddRecExpr(Operands, L, HasNUW, HasNSW);
1847 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1848 /// Simplify the expression as much as possible.
1850 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1852 bool HasNUW, bool HasNSW) {
1853 if (Operands.size() == 1) return Operands[0];
1855 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1856 assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1857 getEffectiveSCEVType(Operands[0]->getType()) &&
1858 "SCEVAddRecExpr operand types don't match!");
1861 if (Operands.back()->isZero()) {
1862 Operands.pop_back();
1863 return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X
1866 // It's tempting to want to call getMaxBackedgeTakenCount count here and
1867 // use that information to infer NUW and NSW flags. However, computing a
1868 // BE count requires calling getAddRecExpr, so we may not yet have a
1869 // meaningful BE count at this point (and if we don't, we'd be stuck
1870 // with a SCEVCouldNotCompute as the cached BE count).
1872 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1873 if (!HasNUW && HasNSW) {
1875 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1876 if (!isKnownNonNegative(Operands[i])) {
1880 if (All) HasNUW = true;
1883 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1884 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1885 const Loop *NestedLoop = NestedAR->getLoop();
1886 if (L->contains(NestedLoop->getHeader()) ?
1887 (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
1888 (!NestedLoop->contains(L->getHeader()) &&
1889 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
1890 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1891 NestedAR->op_end());
1892 Operands[0] = NestedAR->getStart();
1893 // AddRecs require their operands be loop-invariant with respect to their
1894 // loops. Don't perform this transformation if it would break this
1896 bool AllInvariant = true;
1897 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1898 if (!Operands[i]->isLoopInvariant(L)) {
1899 AllInvariant = false;
1903 NestedOperands[0] = getAddRecExpr(Operands, L);
1904 AllInvariant = true;
1905 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1906 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1907 AllInvariant = false;
1911 // Ok, both add recurrences are valid after the transformation.
1912 return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
1914 // Reset Operands to its original state.
1915 Operands[0] = NestedAR;
1919 // Okay, it looks like we really DO need an addrec expr. Check to see if we
1920 // already have one, otherwise create a new one.
1921 FoldingSetNodeID ID;
1922 ID.AddInteger(scAddRecExpr);
1923 ID.AddInteger(Operands.size());
1924 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1925 ID.AddPointer(Operands[i]);
1929 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1931 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
1932 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
1933 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
1934 O, Operands.size(), L);
1935 UniqueSCEVs.InsertNode(S, IP);
1937 if (HasNUW) S->setHasNoUnsignedWrap(true);
1938 if (HasNSW) S->setHasNoSignedWrap(true);
1942 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1944 SmallVector<const SCEV *, 2> Ops;
1947 return getSMaxExpr(Ops);
1951 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1952 assert(!Ops.empty() && "Cannot get empty smax!");
1953 if (Ops.size() == 1) return Ops[0];
1955 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1956 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1957 getEffectiveSCEVType(Ops[0]->getType()) &&
1958 "SCEVSMaxExpr operand types don't match!");
1961 // Sort by complexity, this groups all similar expression types together.
1962 GroupByComplexity(Ops, LI);
1964 // If there are any constants, fold them together.
1966 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1968 assert(Idx < Ops.size());
1969 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1970 // We found two constants, fold them together!
1971 ConstantInt *Fold = ConstantInt::get(getContext(),
1972 APIntOps::smax(LHSC->getValue()->getValue(),
1973 RHSC->getValue()->getValue()));
1974 Ops[0] = getConstant(Fold);
1975 Ops.erase(Ops.begin()+1); // Erase the folded element
1976 if (Ops.size() == 1) return Ops[0];
1977 LHSC = cast<SCEVConstant>(Ops[0]);
1980 // If we are left with a constant minimum-int, strip it off.
1981 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1982 Ops.erase(Ops.begin());
1984 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1985 // If we have an smax with a constant maximum-int, it will always be
1990 if (Ops.size() == 1) return Ops[0];
1993 // Find the first SMax
1994 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1997 // Check to see if one of the operands is an SMax. If so, expand its operands
1998 // onto our operand list, and recurse to simplify.
1999 if (Idx < Ops.size()) {
2000 bool DeletedSMax = false;
2001 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2002 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
2003 Ops.erase(Ops.begin()+Idx);
2008 return getSMaxExpr(Ops);
2011 // Okay, check to see if the same value occurs in the operand list twice. If
2012 // so, delete one. Since we sorted the list, these values are required to
2014 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2015 // X smax Y smax Y --> X smax Y
2016 // X smax Y --> X, if X is always greater than Y
2017 if (Ops[i] == Ops[i+1] ||
2018 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2019 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2021 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2022 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2026 if (Ops.size() == 1) return Ops[0];
2028 assert(!Ops.empty() && "Reduced smax down to nothing!");
2030 // Okay, it looks like we really DO need an smax expr. Check to see if we
2031 // already have one, otherwise create a new one.
2032 FoldingSetNodeID ID;
2033 ID.AddInteger(scSMaxExpr);
2034 ID.AddInteger(Ops.size());
2035 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2036 ID.AddPointer(Ops[i]);
2038 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2039 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2040 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2041 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2043 UniqueSCEVs.InsertNode(S, IP);
2047 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2049 SmallVector<const SCEV *, 2> Ops;
2052 return getUMaxExpr(Ops);
2056 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2057 assert(!Ops.empty() && "Cannot get empty umax!");
2058 if (Ops.size() == 1) return Ops[0];
2060 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2061 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2062 getEffectiveSCEVType(Ops[0]->getType()) &&
2063 "SCEVUMaxExpr operand types don't match!");
2066 // Sort by complexity, this groups all similar expression types together.
2067 GroupByComplexity(Ops, LI);
2069 // If there are any constants, fold them together.
2071 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2073 assert(Idx < Ops.size());
2074 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2075 // We found two constants, fold them together!
2076 ConstantInt *Fold = ConstantInt::get(getContext(),
2077 APIntOps::umax(LHSC->getValue()->getValue(),
2078 RHSC->getValue()->getValue()));
2079 Ops[0] = getConstant(Fold);
2080 Ops.erase(Ops.begin()+1); // Erase the folded element
2081 if (Ops.size() == 1) return Ops[0];
2082 LHSC = cast<SCEVConstant>(Ops[0]);
2085 // If we are left with a constant minimum-int, strip it off.
2086 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2087 Ops.erase(Ops.begin());
2089 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2090 // If we have an umax with a constant maximum-int, it will always be
2095 if (Ops.size() == 1) return Ops[0];
2098 // Find the first UMax
2099 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2102 // Check to see if one of the operands is a UMax. If so, expand its operands
2103 // onto our operand list, and recurse to simplify.
2104 if (Idx < Ops.size()) {
2105 bool DeletedUMax = false;
2106 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2107 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2108 Ops.erase(Ops.begin()+Idx);
2113 return getUMaxExpr(Ops);
2116 // Okay, check to see if the same value occurs in the operand list twice. If
2117 // so, delete one. Since we sorted the list, these values are required to
2119 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2120 // X umax Y umax Y --> X umax Y
2121 // X umax Y --> X, if X is always greater than Y
2122 if (Ops[i] == Ops[i+1] ||
2123 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2124 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2126 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2127 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2131 if (Ops.size() == 1) return Ops[0];
2133 assert(!Ops.empty() && "Reduced umax down to nothing!");
2135 // Okay, it looks like we really DO need a umax expr. Check to see if we
2136 // already have one, otherwise create a new one.
2137 FoldingSetNodeID ID;
2138 ID.AddInteger(scUMaxExpr);
2139 ID.AddInteger(Ops.size());
2140 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2141 ID.AddPointer(Ops[i]);
2143 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2144 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2145 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2146 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2148 UniqueSCEVs.InsertNode(S, IP);
2152 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2154 // ~smax(~x, ~y) == smin(x, y).
2155 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2158 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2160 // ~umax(~x, ~y) == umin(x, y)
2161 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2164 const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2165 // If we have TargetData, we can bypass creating a target-independent
2166 // constant expression and then folding it back into a ConstantInt.
2167 // This is just a compile-time optimization.
2169 return getConstant(TD->getIntPtrType(getContext()),
2170 TD->getTypeAllocSize(AllocTy));
2172 Constant *C = ConstantExpr::getSizeOf(AllocTy);
2173 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2174 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2176 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2177 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2180 const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2181 Constant *C = ConstantExpr::getAlignOf(AllocTy);
2182 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2183 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2185 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2186 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2189 const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2191 // If we have TargetData, we can bypass creating a target-independent
2192 // constant expression and then folding it back into a ConstantInt.
2193 // This is just a compile-time optimization.
2195 return getConstant(TD->getIntPtrType(getContext()),
2196 TD->getStructLayout(STy)->getElementOffset(FieldNo));
2198 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2199 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2200 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2202 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2203 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2206 const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2207 Constant *FieldNo) {
2208 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2209 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2210 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2212 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2213 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2216 const SCEV *ScalarEvolution::getUnknown(Value *V) {
2217 // Don't attempt to do anything other than create a SCEVUnknown object
2218 // here. createSCEV only calls getUnknown after checking for all other
2219 // interesting possibilities, and any other code that calls getUnknown
2220 // is doing so in order to hide a value from SCEV canonicalization.
2222 FoldingSetNodeID ID;
2223 ID.AddInteger(scUnknown);
2226 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2227 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
2228 UniqueSCEVs.InsertNode(S, IP);
2232 //===----------------------------------------------------------------------===//
2233 // Basic SCEV Analysis and PHI Idiom Recognition Code
2236 /// isSCEVable - Test if values of the given type are analyzable within
2237 /// the SCEV framework. This primarily includes integer types, and it
2238 /// can optionally include pointer types if the ScalarEvolution class
2239 /// has access to target-specific information.
2240 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2241 // Integers and pointers are always SCEVable.
2242 return Ty->isIntegerTy() || Ty->isPointerTy();
2245 /// getTypeSizeInBits - Return the size in bits of the specified type,
2246 /// for which isSCEVable must return true.
2247 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2248 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2250 // If we have a TargetData, use it!
2252 return TD->getTypeSizeInBits(Ty);
2254 // Integer types have fixed sizes.
2255 if (Ty->isIntegerTy())
2256 return Ty->getPrimitiveSizeInBits();
2258 // The only other support type is pointer. Without TargetData, conservatively
2259 // assume pointers are 64-bit.
2260 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2264 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2265 /// the given type and which represents how SCEV will treat the given
2266 /// type, for which isSCEVable must return true. For pointer types,
2267 /// this is the pointer-sized integer type.
2268 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2269 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2271 if (Ty->isIntegerTy())
2274 // The only other support type is pointer.
2275 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2276 if (TD) return TD->getIntPtrType(getContext());
2278 // Without TargetData, conservatively assume pointers are 64-bit.
2279 return Type::getInt64Ty(getContext());
2282 const SCEV *ScalarEvolution::getCouldNotCompute() {
2283 return &CouldNotCompute;
2286 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2287 /// expression and create a new one.
2288 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2289 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2291 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2292 if (I != Scalars.end()) return I->second;
2293 const SCEV *S = createSCEV(V);
2294 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2298 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
2299 /// specified signed integer value and return a SCEV for the constant.
2300 const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
2301 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2302 return getConstant(ConstantInt::get(ITy, Val));
2305 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2307 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2308 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2310 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2312 const Type *Ty = V->getType();
2313 Ty = getEffectiveSCEVType(Ty);
2314 return getMulExpr(V,
2315 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2318 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2319 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2320 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2322 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2324 const Type *Ty = V->getType();
2325 Ty = getEffectiveSCEVType(Ty);
2326 const SCEV *AllOnes =
2327 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2328 return getMinusSCEV(AllOnes, V);
2331 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2333 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2336 return getAddExpr(LHS, getNegativeSCEV(RHS));
2339 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2340 /// input value to the specified type. If the type must be extended, it is zero
2343 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2345 const Type *SrcTy = V->getType();
2346 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2347 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2348 "Cannot truncate or zero extend with non-integer arguments!");
2349 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2350 return V; // No conversion
2351 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2352 return getTruncateExpr(V, Ty);
2353 return getZeroExtendExpr(V, Ty);
2356 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2357 /// input value to the specified type. If the type must be extended, it is sign
2360 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2362 const Type *SrcTy = V->getType();
2363 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2364 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2365 "Cannot truncate or zero extend with non-integer arguments!");
2366 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2367 return V; // No conversion
2368 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2369 return getTruncateExpr(V, Ty);
2370 return getSignExtendExpr(V, Ty);
2373 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2374 /// input value to the specified type. If the type must be extended, it is zero
2375 /// extended. The conversion must not be narrowing.
2377 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2378 const Type *SrcTy = V->getType();
2379 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2380 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2381 "Cannot noop or zero extend with non-integer arguments!");
2382 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2383 "getNoopOrZeroExtend cannot truncate!");
2384 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2385 return V; // No conversion
2386 return getZeroExtendExpr(V, Ty);
2389 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2390 /// input value to the specified type. If the type must be extended, it is sign
2391 /// extended. The conversion must not be narrowing.
2393 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2394 const Type *SrcTy = V->getType();
2395 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2396 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2397 "Cannot noop or sign extend with non-integer arguments!");
2398 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2399 "getNoopOrSignExtend cannot truncate!");
2400 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2401 return V; // No conversion
2402 return getSignExtendExpr(V, Ty);
2405 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2406 /// the input value to the specified type. If the type must be extended,
2407 /// it is extended with unspecified bits. The conversion must not be
2410 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2411 const Type *SrcTy = V->getType();
2412 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2413 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2414 "Cannot noop or any extend with non-integer arguments!");
2415 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2416 "getNoopOrAnyExtend cannot truncate!");
2417 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2418 return V; // No conversion
2419 return getAnyExtendExpr(V, Ty);
2422 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2423 /// input value to the specified type. The conversion must not be widening.
2425 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2426 const Type *SrcTy = V->getType();
2427 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2428 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2429 "Cannot truncate or noop with non-integer arguments!");
2430 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2431 "getTruncateOrNoop cannot extend!");
2432 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2433 return V; // No conversion
2434 return getTruncateExpr(V, Ty);
2437 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2438 /// the types using zero-extension, and then perform a umax operation
2440 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2442 const SCEV *PromotedLHS = LHS;
2443 const SCEV *PromotedRHS = RHS;
2445 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2446 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2448 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2450 return getUMaxExpr(PromotedLHS, PromotedRHS);
2453 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2454 /// the types using zero-extension, and then perform a umin operation
2456 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2458 const SCEV *PromotedLHS = LHS;
2459 const SCEV *PromotedRHS = RHS;
2461 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2462 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2464 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2466 return getUMinExpr(PromotedLHS, PromotedRHS);
2469 /// PushDefUseChildren - Push users of the given Instruction
2470 /// onto the given Worklist.
2472 PushDefUseChildren(Instruction *I,
2473 SmallVectorImpl<Instruction *> &Worklist) {
2474 // Push the def-use children onto the Worklist stack.
2475 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2477 Worklist.push_back(cast<Instruction>(UI));
2480 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2481 /// instructions that depend on the given instruction and removes them from
2482 /// the Scalars map if they reference SymName. This is used during PHI
2485 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2486 SmallVector<Instruction *, 16> Worklist;
2487 PushDefUseChildren(PN, Worklist);
2489 SmallPtrSet<Instruction *, 8> Visited;
2491 while (!Worklist.empty()) {
2492 Instruction *I = Worklist.pop_back_val();
2493 if (!Visited.insert(I)) continue;
2495 std::map<SCEVCallbackVH, const SCEV *>::iterator It =
2496 Scalars.find(static_cast<Value *>(I));
2497 if (It != Scalars.end()) {
2498 // Short-circuit the def-use traversal if the symbolic name
2499 // ceases to appear in expressions.
2500 if (It->second != SymName && !It->second->hasOperand(SymName))
2503 // SCEVUnknown for a PHI either means that it has an unrecognized
2504 // structure, it's a PHI that's in the progress of being computed
2505 // by createNodeForPHI, or it's a single-value PHI. In the first case,
2506 // additional loop trip count information isn't going to change anything.
2507 // In the second case, createNodeForPHI will perform the necessary
2508 // updates on its own when it gets to that point. In the third, we do
2509 // want to forget the SCEVUnknown.
2510 if (!isa<PHINode>(I) ||
2511 !isa<SCEVUnknown>(It->second) ||
2512 (I != PN && It->second == SymName)) {
2513 ValuesAtScopes.erase(It->second);
2518 PushDefUseChildren(I, Worklist);
2522 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2523 /// a loop header, making it a potential recurrence, or it doesn't.
2525 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2526 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2527 if (L->getHeader() == PN->getParent()) {
2528 // The loop may have multiple entrances or multiple exits; we can analyze
2529 // this phi as an addrec if it has a unique entry value and a unique
2531 Value *BEValueV = 0, *StartValueV = 0;
2532 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2533 Value *V = PN->getIncomingValue(i);
2534 if (L->contains(PN->getIncomingBlock(i))) {
2537 } else if (BEValueV != V) {
2541 } else if (!StartValueV) {
2543 } else if (StartValueV != V) {
2548 if (BEValueV && StartValueV) {
2549 // While we are analyzing this PHI node, handle its value symbolically.
2550 const SCEV *SymbolicName = getUnknown(PN);
2551 assert(Scalars.find(PN) == Scalars.end() &&
2552 "PHI node already processed?");
2553 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2555 // Using this symbolic name for the PHI, analyze the value coming around
2557 const SCEV *BEValue = getSCEV(BEValueV);
2559 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2560 // has a special value for the first iteration of the loop.
2562 // If the value coming around the backedge is an add with the symbolic
2563 // value we just inserted, then we found a simple induction variable!
2564 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2565 // If there is a single occurrence of the symbolic value, replace it
2566 // with a recurrence.
2567 unsigned FoundIndex = Add->getNumOperands();
2568 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2569 if (Add->getOperand(i) == SymbolicName)
2570 if (FoundIndex == e) {
2575 if (FoundIndex != Add->getNumOperands()) {
2576 // Create an add with everything but the specified operand.
2577 SmallVector<const SCEV *, 8> Ops;
2578 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2579 if (i != FoundIndex)
2580 Ops.push_back(Add->getOperand(i));
2581 const SCEV *Accum = getAddExpr(Ops);
2583 // This is not a valid addrec if the step amount is varying each
2584 // loop iteration, but is not itself an addrec in this loop.
2585 if (Accum->isLoopInvariant(L) ||
2586 (isa<SCEVAddRecExpr>(Accum) &&
2587 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2588 bool HasNUW = false;
2589 bool HasNSW = false;
2591 // If the increment doesn't overflow, then neither the addrec nor
2592 // the post-increment will overflow.
2593 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2594 if (OBO->hasNoUnsignedWrap())
2596 if (OBO->hasNoSignedWrap())
2600 const SCEV *StartVal = getSCEV(StartValueV);
2601 const SCEV *PHISCEV =
2602 getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2604 // Since the no-wrap flags are on the increment, they apply to the
2605 // post-incremented value as well.
2606 if (Accum->isLoopInvariant(L))
2607 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2608 Accum, L, HasNUW, HasNSW);
2610 // Okay, for the entire analysis of this edge we assumed the PHI
2611 // to be symbolic. We now need to go back and purge all of the
2612 // entries for the scalars that use the symbolic expression.
2613 ForgetSymbolicName(PN, SymbolicName);
2614 Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2618 } else if (const SCEVAddRecExpr *AddRec =
2619 dyn_cast<SCEVAddRecExpr>(BEValue)) {
2620 // Otherwise, this could be a loop like this:
2621 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2622 // In this case, j = {1,+,1} and BEValue is j.
2623 // Because the other in-value of i (0) fits the evolution of BEValue
2624 // i really is an addrec evolution.
2625 if (AddRec->getLoop() == L && AddRec->isAffine()) {
2626 const SCEV *StartVal = getSCEV(StartValueV);
2628 // If StartVal = j.start - j.stride, we can use StartVal as the
2629 // initial step of the addrec evolution.
2630 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2631 AddRec->getOperand(1))) {
2632 const SCEV *PHISCEV =
2633 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2635 // Okay, for the entire analysis of this edge we assumed the PHI
2636 // to be symbolic. We now need to go back and purge all of the
2637 // entries for the scalars that use the symbolic expression.
2638 ForgetSymbolicName(PN, SymbolicName);
2639 Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2647 // If the PHI has a single incoming value, follow that value, unless the
2648 // PHI's incoming blocks are in a different loop, in which case doing so
2649 // risks breaking LCSSA form. Instcombine would normally zap these, but
2650 // it doesn't have DominatorTree information, so it may miss cases.
2651 if (Value *V = PN->hasConstantValue(DT)) {
2652 bool AllSameLoop = true;
2653 Loop *PNLoop = LI->getLoopFor(PN->getParent());
2654 for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2655 if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
2656 AllSameLoop = false;
2663 // If it's not a loop phi, we can't handle it yet.
2664 return getUnknown(PN);
2667 /// createNodeForGEP - Expand GEP instructions into add and multiply
2668 /// operations. This allows them to be analyzed by regular SCEV code.
2670 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2672 bool InBounds = GEP->isInBounds();
2673 const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2674 Value *Base = GEP->getOperand(0);
2675 // Don't attempt to analyze GEPs over unsized objects.
2676 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2677 return getUnknown(GEP);
2678 const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
2679 gep_type_iterator GTI = gep_type_begin(GEP);
2680 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2684 // Compute the (potentially symbolic) offset in bytes for this index.
2685 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2686 // For a struct, add the member offset.
2687 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2688 TotalOffset = getAddExpr(TotalOffset,
2689 getOffsetOfExpr(STy, FieldNo),
2690 /*HasNUW=*/false, /*HasNSW=*/InBounds);
2692 // For an array, add the element offset, explicitly scaled.
2693 const SCEV *LocalOffset = getSCEV(Index);
2694 // Getelementptr indices are signed.
2695 LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2696 // Lower "inbounds" GEPs to NSW arithmetic.
2697 LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
2698 /*HasNUW=*/false, /*HasNSW=*/InBounds);
2699 TotalOffset = getAddExpr(TotalOffset, LocalOffset,
2700 /*HasNUW=*/false, /*HasNSW=*/InBounds);
2703 return getAddExpr(getSCEV(Base), TotalOffset,
2704 /*HasNUW=*/false, /*HasNSW=*/InBounds);
2707 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2708 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2709 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2710 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2712 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2713 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2714 return C->getValue()->getValue().countTrailingZeros();
2716 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2717 return std::min(GetMinTrailingZeros(T->getOperand()),
2718 (uint32_t)getTypeSizeInBits(T->getType()));
2720 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2721 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2722 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2723 getTypeSizeInBits(E->getType()) : OpRes;
2726 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2727 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2728 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2729 getTypeSizeInBits(E->getType()) : OpRes;
2732 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2733 // The result is the min of all operands results.
2734 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2735 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2736 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2740 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2741 // The result is the sum of all operands results.
2742 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2743 uint32_t BitWidth = getTypeSizeInBits(M->getType());
2744 for (unsigned i = 1, e = M->getNumOperands();
2745 SumOpRes != BitWidth && i != e; ++i)
2746 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2751 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2752 // The result is the min of all operands results.
2753 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2754 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2755 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2759 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2760 // The result is the min of all operands results.
2761 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2762 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2763 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2767 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2768 // The result is the min of all operands results.
2769 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2770 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2771 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2775 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2776 // For a SCEVUnknown, ask ValueTracking.
2777 unsigned BitWidth = getTypeSizeInBits(U->getType());
2778 APInt Mask = APInt::getAllOnesValue(BitWidth);
2779 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2780 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2781 return Zeros.countTrailingOnes();
2788 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2791 ScalarEvolution::getUnsignedRange(const SCEV *S) {
2793 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2794 return ConstantRange(C->getValue()->getValue());
2796 unsigned BitWidth = getTypeSizeInBits(S->getType());
2797 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2799 // If the value has known zeros, the maximum unsigned value will have those
2800 // known zeros as well.
2801 uint32_t TZ = GetMinTrailingZeros(S);
2803 ConservativeResult =
2804 ConstantRange(APInt::getMinValue(BitWidth),
2805 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2807 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2808 ConstantRange X = getUnsignedRange(Add->getOperand(0));
2809 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2810 X = X.add(getUnsignedRange(Add->getOperand(i)));
2811 return ConservativeResult.intersectWith(X);
2814 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2815 ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2816 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2817 X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2818 return ConservativeResult.intersectWith(X);
2821 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2822 ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2823 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2824 X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2825 return ConservativeResult.intersectWith(X);
2828 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2829 ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2830 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2831 X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2832 return ConservativeResult.intersectWith(X);
2835 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2836 ConstantRange X = getUnsignedRange(UDiv->getLHS());
2837 ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2838 return ConservativeResult.intersectWith(X.udiv(Y));
2841 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2842 ConstantRange X = getUnsignedRange(ZExt->getOperand());
2843 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
2846 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2847 ConstantRange X = getUnsignedRange(SExt->getOperand());
2848 return ConservativeResult.intersectWith(X.signExtend(BitWidth));
2851 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2852 ConstantRange X = getUnsignedRange(Trunc->getOperand());
2853 return ConservativeResult.intersectWith(X.truncate(BitWidth));
2856 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2857 // If there's no unsigned wrap, the value will never be less than its
2859 if (AddRec->hasNoUnsignedWrap())
2860 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
2861 if (!C->getValue()->isZero())
2862 ConservativeResult =
2863 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
2865 // TODO: non-affine addrec
2866 if (AddRec->isAffine()) {
2867 const Type *Ty = AddRec->getType();
2868 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2869 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
2870 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
2871 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2873 const SCEV *Start = AddRec->getStart();
2874 const SCEV *Step = AddRec->getStepRecurrence(*this);
2876 ConstantRange StartRange = getUnsignedRange(Start);
2877 ConstantRange StepRange = getSignedRange(Step);
2878 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
2879 ConstantRange EndRange =
2880 StartRange.add(MaxBECountRange.multiply(StepRange));
2882 // Check for overflow. This must be done with ConstantRange arithmetic
2883 // because we could be called from within the ScalarEvolution overflow
2885 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
2886 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
2887 ConstantRange ExtMaxBECountRange =
2888 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
2889 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
2890 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
2892 return ConservativeResult;
2894 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2895 EndRange.getUnsignedMin());
2896 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2897 EndRange.getUnsignedMax());
2898 if (Min.isMinValue() && Max.isMaxValue())
2899 return ConservativeResult;
2900 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
2904 return ConservativeResult;
2907 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2908 // For a SCEVUnknown, ask ValueTracking.
2909 APInt Mask = APInt::getAllOnesValue(BitWidth);
2910 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2911 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2912 if (Ones == ~Zeros + 1)
2913 return ConservativeResult;
2914 return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
2917 return ConservativeResult;
2920 /// getSignedRange - Determine the signed range for a particular SCEV.
2923 ScalarEvolution::getSignedRange(const SCEV *S) {
2925 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2926 return ConstantRange(C->getValue()->getValue());
2928 unsigned BitWidth = getTypeSizeInBits(S->getType());
2929 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2931 // If the value has known zeros, the maximum signed value will have those
2932 // known zeros as well.
2933 uint32_t TZ = GetMinTrailingZeros(S);
2935 ConservativeResult =
2936 ConstantRange(APInt::getSignedMinValue(BitWidth),
2937 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
2939 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2940 ConstantRange X = getSignedRange(Add->getOperand(0));
2941 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2942 X = X.add(getSignedRange(Add->getOperand(i)));
2943 return ConservativeResult.intersectWith(X);
2946 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2947 ConstantRange X = getSignedRange(Mul->getOperand(0));
2948 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2949 X = X.multiply(getSignedRange(Mul->getOperand(i)));
2950 return ConservativeResult.intersectWith(X);
2953 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2954 ConstantRange X = getSignedRange(SMax->getOperand(0));
2955 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2956 X = X.smax(getSignedRange(SMax->getOperand(i)));
2957 return ConservativeResult.intersectWith(X);
2960 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2961 ConstantRange X = getSignedRange(UMax->getOperand(0));
2962 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2963 X = X.umax(getSignedRange(UMax->getOperand(i)));
2964 return ConservativeResult.intersectWith(X);
2967 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2968 ConstantRange X = getSignedRange(UDiv->getLHS());
2969 ConstantRange Y = getSignedRange(UDiv->getRHS());
2970 return ConservativeResult.intersectWith(X.udiv(Y));
2973 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2974 ConstantRange X = getSignedRange(ZExt->getOperand());
2975 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
2978 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2979 ConstantRange X = getSignedRange(SExt->getOperand());
2980 return ConservativeResult.intersectWith(X.signExtend(BitWidth));
2983 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2984 ConstantRange X = getSignedRange(Trunc->getOperand());
2985 return ConservativeResult.intersectWith(X.truncate(BitWidth));
2988 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2989 // If there's no signed wrap, and all the operands have the same sign or
2990 // zero, the value won't ever change sign.
2991 if (AddRec->hasNoSignedWrap()) {
2992 bool AllNonNeg = true;
2993 bool AllNonPos = true;
2994 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
2995 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
2996 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
2999 ConservativeResult = ConservativeResult.intersectWith(
3000 ConstantRange(APInt(BitWidth, 0),
3001 APInt::getSignedMinValue(BitWidth)));
3003 ConservativeResult = ConservativeResult.intersectWith(
3004 ConstantRange(APInt::getSignedMinValue(BitWidth),
3005 APInt(BitWidth, 1)));
3008 // TODO: non-affine addrec
3009 if (AddRec->isAffine()) {
3010 const Type *Ty = AddRec->getType();
3011 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3012 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3013 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3014 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3016 const SCEV *Start = AddRec->getStart();
3017 const SCEV *Step = AddRec->getStepRecurrence(*this);
3019 ConstantRange StartRange = getSignedRange(Start);
3020 ConstantRange StepRange = getSignedRange(Step);
3021 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3022 ConstantRange EndRange =
3023 StartRange.add(MaxBECountRange.multiply(StepRange));
3025 // Check for overflow. This must be done with ConstantRange arithmetic
3026 // because we could be called from within the ScalarEvolution overflow
3028 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3029 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3030 ConstantRange ExtMaxBECountRange =
3031 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3032 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3033 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3035 return ConservativeResult;
3037 APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3038 EndRange.getSignedMin());
3039 APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3040 EndRange.getSignedMax());
3041 if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3042 return ConservativeResult;
3043 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3047 return ConservativeResult;
3050 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3051 // For a SCEVUnknown, ask ValueTracking.
3052 if (!U->getValue()->getType()->isIntegerTy() && !TD)
3053 return ConservativeResult;
3054 unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3056 return ConservativeResult;
3057 return ConservativeResult.intersectWith(
3058 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3059 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
3062 return ConservativeResult;
3065 /// createSCEV - We know that there is no SCEV for the specified value.
3066 /// Analyze the expression.
3068 const SCEV *ScalarEvolution::createSCEV(Value *V) {
3069 if (!isSCEVable(V->getType()))
3070 return getUnknown(V);
3072 unsigned Opcode = Instruction::UserOp1;
3073 if (Instruction *I = dyn_cast<Instruction>(V)) {
3074 Opcode = I->getOpcode();
3076 // Don't attempt to analyze instructions in blocks that aren't
3077 // reachable. Such instructions don't matter, and they aren't required
3078 // to obey basic rules for definitions dominating uses which this
3079 // analysis depends on.
3080 if (!DT->isReachableFromEntry(I->getParent()))
3081 return getUnknown(V);
3082 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3083 Opcode = CE->getOpcode();
3084 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3085 return getConstant(CI);
3086 else if (isa<ConstantPointerNull>(V))
3087 return getConstant(V->getType(), 0);
3088 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3089 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3091 return getUnknown(V);
3093 Operator *U = cast<Operator>(V);
3095 case Instruction::Add:
3096 // Don't transfer the NSW and NUW bits from the Add instruction to the
3097 // Add expression, because the Instruction may be guarded by control
3098 // flow and the no-overflow bits may not be valid for the expression in
3100 return getAddExpr(getSCEV(U->getOperand(0)),
3101 getSCEV(U->getOperand(1)));
3102 case Instruction::Mul:
3103 // Don't transfer the NSW and NUW bits from the Mul instruction to the
3104 // Mul expression, as with Add.
3105 return getMulExpr(getSCEV(U->getOperand(0)),
3106 getSCEV(U->getOperand(1)));
3107 case Instruction::UDiv:
3108 return getUDivExpr(getSCEV(U->getOperand(0)),
3109 getSCEV(U->getOperand(1)));
3110 case Instruction::Sub:
3111 return getMinusSCEV(getSCEV(U->getOperand(0)),
3112 getSCEV(U->getOperand(1)));
3113 case Instruction::And:
3114 // For an expression like x&255 that merely masks off the high bits,
3115 // use zext(trunc(x)) as the SCEV expression.
3116 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3117 if (CI->isNullValue())
3118 return getSCEV(U->getOperand(1));
3119 if (CI->isAllOnesValue())
3120 return getSCEV(U->getOperand(0));
3121 const APInt &A = CI->getValue();
3123 // Instcombine's ShrinkDemandedConstant may strip bits out of
3124 // constants, obscuring what would otherwise be a low-bits mask.
3125 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3126 // knew about to reconstruct a low-bits mask value.
3127 unsigned LZ = A.countLeadingZeros();
3128 unsigned BitWidth = A.getBitWidth();
3129 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3130 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3131 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3133 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3135 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3137 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3138 IntegerType::get(getContext(), BitWidth - LZ)),
3143 case Instruction::Or:
3144 // If the RHS of the Or is a constant, we may have something like:
3145 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
3146 // optimizations will transparently handle this case.
3148 // In order for this transformation to be safe, the LHS must be of the
3149 // form X*(2^n) and the Or constant must be less than 2^n.
3150 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3151 const SCEV *LHS = getSCEV(U->getOperand(0));
3152 const APInt &CIVal = CI->getValue();
3153 if (GetMinTrailingZeros(LHS) >=
3154 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3155 // Build a plain add SCEV.
3156 const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3157 // If the LHS of the add was an addrec and it has no-wrap flags,
3158 // transfer the no-wrap flags, since an or won't introduce a wrap.
3159 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3160 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3161 if (OldAR->hasNoUnsignedWrap())
3162 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3163 if (OldAR->hasNoSignedWrap())
3164 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3170 case Instruction::Xor:
3171 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3172 // If the RHS of the xor is a signbit, then this is just an add.
3173 // Instcombine turns add of signbit into xor as a strength reduction step.
3174 if (CI->getValue().isSignBit())
3175 return getAddExpr(getSCEV(U->getOperand(0)),
3176 getSCEV(U->getOperand(1)));
3178 // If the RHS of xor is -1, then this is a not operation.
3179 if (CI->isAllOnesValue())
3180 return getNotSCEV(getSCEV(U->getOperand(0)));
3182 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3183 // This is a variant of the check for xor with -1, and it handles
3184 // the case where instcombine has trimmed non-demanded bits out
3185 // of an xor with -1.
3186 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3187 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3188 if (BO->getOpcode() == Instruction::And &&
3189 LCI->getValue() == CI->getValue())
3190 if (const SCEVZeroExtendExpr *Z =
3191 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3192 const Type *UTy = U->getType();
3193 const SCEV *Z0 = Z->getOperand();
3194 const Type *Z0Ty = Z0->getType();
3195 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3197 // If C is a low-bits mask, the zero extend is serving to
3198 // mask off the high bits. Complement the operand and
3199 // re-apply the zext.
3200 if (APIntOps::isMask(Z0TySize, CI->getValue()))
3201 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3203 // If C is a single bit, it may be in the sign-bit position
3204 // before the zero-extend. In this case, represent the xor
3205 // using an add, which is equivalent, and re-apply the zext.
3206 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3207 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3209 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3215 case Instruction::Shl:
3216 // Turn shift left of a constant amount into a multiply.
3217 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3218 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3220 // If the shift count is not less than the bitwidth, the result of
3221 // the shift is undefined. Don't try to analyze it, because the
3222 // resolution chosen here may differ from the resolution chosen in
3223 // other parts of the compiler.
3224 if (SA->getValue().uge(BitWidth))
3227 Constant *X = ConstantInt::get(getContext(),
3228 APInt(BitWidth, 1).shl(SA->getZExtValue()));
3229 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3233 case Instruction::LShr:
3234 // Turn logical shift right of a constant into a unsigned divide.
3235 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3236 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3238 // If the shift count is not less than the bitwidth, the result of
3239 // the shift is undefined. Don't try to analyze it, because the
3240 // resolution chosen here may differ from the resolution chosen in
3241 // other parts of the compiler.
3242 if (SA->getValue().uge(BitWidth))
3245 Constant *X = ConstantInt::get(getContext(),
3246 APInt(BitWidth, 1).shl(SA->getZExtValue()));
3247 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3251 case Instruction::AShr:
3252 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3253 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3254 if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3255 if (L->getOpcode() == Instruction::Shl &&
3256 L->getOperand(1) == U->getOperand(1)) {
3257 uint64_t BitWidth = getTypeSizeInBits(U->getType());
3259 // If the shift count is not less than the bitwidth, the result of
3260 // the shift is undefined. Don't try to analyze it, because the
3261 // resolution chosen here may differ from the resolution chosen in
3262 // other parts of the compiler.
3263 if (CI->getValue().uge(BitWidth))
3266 uint64_t Amt = BitWidth - CI->getZExtValue();
3267 if (Amt == BitWidth)
3268 return getSCEV(L->getOperand(0)); // shift by zero --> noop
3270 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3271 IntegerType::get(getContext(),
3277 case Instruction::Trunc:
3278 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3280 case Instruction::ZExt:
3281 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3283 case Instruction::SExt:
3284 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3286 case Instruction::BitCast:
3287 // BitCasts are no-op casts so we just eliminate the cast.
3288 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3289 return getSCEV(U->getOperand(0));
3292 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3293 // lead to pointer expressions which cannot safely be expanded to GEPs,
3294 // because ScalarEvolution doesn't respect the GEP aliasing rules when
3295 // simplifying integer expressions.
3297 case Instruction::GetElementPtr:
3298 return createNodeForGEP(cast<GEPOperator>(U));
3300 case Instruction::PHI:
3301 return createNodeForPHI(cast<PHINode>(U));
3303 case Instruction::Select:
3304 // This could be a smax or umax that was lowered earlier.
3305 // Try to recover it.
3306 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3307 Value *LHS = ICI->getOperand(0);
3308 Value *RHS = ICI->getOperand(1);
3309 switch (ICI->getPredicate()) {
3310 case ICmpInst::ICMP_SLT:
3311 case ICmpInst::ICMP_SLE:
3312 std::swap(LHS, RHS);
3314 case ICmpInst::ICMP_SGT:
3315 case ICmpInst::ICMP_SGE:
3316 // a >s b ? a+x : b+x -> smax(a, b)+x
3317 // a >s b ? b+x : a+x -> smin(a, b)+x
3318 if (LHS->getType() == U->getType()) {
3319 const SCEV *LS = getSCEV(LHS);
3320 const SCEV *RS = getSCEV(RHS);
3321 const SCEV *LA = getSCEV(U->getOperand(1));
3322 const SCEV *RA = getSCEV(U->getOperand(2));
3323 const SCEV *LDiff = getMinusSCEV(LA, LS);
3324 const SCEV *RDiff = getMinusSCEV(RA, RS);
3326 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3327 LDiff = getMinusSCEV(LA, RS);
3328 RDiff = getMinusSCEV(RA, LS);
3330 return getAddExpr(getSMinExpr(LS, RS), LDiff);
3333 case ICmpInst::ICMP_ULT:
3334 case ICmpInst::ICMP_ULE:
3335 std::swap(LHS, RHS);
3337 case ICmpInst::ICMP_UGT:
3338 case ICmpInst::ICMP_UGE:
3339 // a >u b ? a+x : b+x -> umax(a, b)+x
3340 // a >u b ? b+x : a+x -> umin(a, b)+x
3341 if (LHS->getType() == U->getType()) {
3342 const SCEV *LS = getSCEV(LHS);
3343 const SCEV *RS = getSCEV(RHS);
3344 const SCEV *LA = getSCEV(U->getOperand(1));
3345 const SCEV *RA = getSCEV(U->getOperand(2));
3346 const SCEV *LDiff = getMinusSCEV(LA, LS);
3347 const SCEV *RDiff = getMinusSCEV(RA, RS);
3349 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3350 LDiff = getMinusSCEV(LA, RS);
3351 RDiff = getMinusSCEV(RA, LS);
3353 return getAddExpr(getUMinExpr(LS, RS), LDiff);
3356 case ICmpInst::ICMP_NE:
3357 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
3358 if (LHS->getType() == U->getType() &&
3359 isa<ConstantInt>(RHS) &&
3360 cast<ConstantInt>(RHS)->isZero()) {
3361 const SCEV *One = getConstant(LHS->getType(), 1);
3362 const SCEV *LS = getSCEV(LHS);
3363 const SCEV *LA = getSCEV(U->getOperand(1));
3364 const SCEV *RA = getSCEV(U->getOperand(2));
3365 const SCEV *LDiff = getMinusSCEV(LA, LS);
3366 const SCEV *RDiff = getMinusSCEV(RA, One);
3368 return getAddExpr(getUMaxExpr(LS, One), LDiff);
3371 case ICmpInst::ICMP_EQ:
3372 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
3373 if (LHS->getType() == U->getType() &&
3374 isa<ConstantInt>(RHS) &&
3375 cast<ConstantInt>(RHS)->isZero()) {
3376 const SCEV *One = getConstant(LHS->getType(), 1);
3377 const SCEV *LS = getSCEV(LHS);
3378 const SCEV *LA = getSCEV(U->getOperand(1));
3379 const SCEV *RA = getSCEV(U->getOperand(2));
3380 const SCEV *LDiff = getMinusSCEV(LA, One);
3381 const SCEV *RDiff = getMinusSCEV(RA, LS);
3383 return getAddExpr(getUMaxExpr(LS, One), LDiff);
3391 default: // We cannot analyze this expression.
3395 return getUnknown(V);
3400 //===----------------------------------------------------------------------===//
3401 // Iteration Count Computation Code
3404 /// getBackedgeTakenCount - If the specified loop has a predictable
3405 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3406 /// object. The backedge-taken count is the number of times the loop header
3407 /// will be branched to from within the loop. This is one less than the
3408 /// trip count of the loop, since it doesn't count the first iteration,
3409 /// when the header is branched to from outside the loop.
3411 /// Note that it is not valid to call this method on a loop without a
3412 /// loop-invariant backedge-taken count (see
3413 /// hasLoopInvariantBackedgeTakenCount).
3415 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3416 return getBackedgeTakenInfo(L).Exact;
3419 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3420 /// return the least SCEV value that is known never to be less than the
3421 /// actual backedge taken count.
3422 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3423 return getBackedgeTakenInfo(L).Max;
3426 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
3427 /// onto the given Worklist.
3429 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3430 BasicBlock *Header = L->getHeader();
3432 // Push all Loop-header PHIs onto the Worklist stack.
3433 for (BasicBlock::iterator I = Header->begin();
3434 PHINode *PN = dyn_cast<PHINode>(I); ++I)
3435 Worklist.push_back(PN);
3438 const ScalarEvolution::BackedgeTakenInfo &
3439 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3440 // Initially insert a CouldNotCompute for this loop. If the insertion
3441 // succeeds, proceed to actually compute a backedge-taken count and
3442 // update the value. The temporary CouldNotCompute value tells SCEV
3443 // code elsewhere that it shouldn't attempt to request a new
3444 // backedge-taken count, which could result in infinite recursion.
3445 std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3446 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3448 BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3449 if (BECount.Exact != getCouldNotCompute()) {
3450 assert(BECount.Exact->isLoopInvariant(L) &&
3451 BECount.Max->isLoopInvariant(L) &&
3452 "Computed backedge-taken count isn't loop invariant for loop!");
3453 ++NumTripCountsComputed;
3455 // Update the value in the map.
3456 Pair.first->second = BECount;
3458 if (BECount.Max != getCouldNotCompute())
3459 // Update the value in the map.
3460 Pair.first->second = BECount;
3461 if (isa<PHINode>(L->getHeader()->begin()))
3462 // Only count loops that have phi nodes as not being computable.
3463 ++NumTripCountsNotComputed;
3466 // Now that we know more about the trip count for this loop, forget any
3467 // existing SCEV values for PHI nodes in this loop since they are only
3468 // conservative estimates made without the benefit of trip count
3469 // information. This is similar to the code in forgetLoop, except that
3470 // it handles SCEVUnknown PHI nodes specially.
3471 if (BECount.hasAnyInfo()) {
3472 SmallVector<Instruction *, 16> Worklist;
3473 PushLoopPHIs(L, Worklist);
3475 SmallPtrSet<Instruction *, 8> Visited;
3476 while (!Worklist.empty()) {
3477 Instruction *I = Worklist.pop_back_val();
3478 if (!Visited.insert(I)) continue;
3480 std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3481 Scalars.find(static_cast<Value *>(I));
3482 if (It != Scalars.end()) {
3483 // SCEVUnknown for a PHI either means that it has an unrecognized
3484 // structure, or it's a PHI that's in the progress of being computed
3485 // by createNodeForPHI. In the former case, additional loop trip
3486 // count information isn't going to change anything. In the later
3487 // case, createNodeForPHI will perform the necessary updates on its
3488 // own when it gets to that point.
3489 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
3490 ValuesAtScopes.erase(It->second);
3493 if (PHINode *PN = dyn_cast<PHINode>(I))
3494 ConstantEvolutionLoopExitValue.erase(PN);
3497 PushDefUseChildren(I, Worklist);
3501 return Pair.first->second;
3504 /// forgetLoop - This method should be called by the client when it has
3505 /// changed a loop in a way that may effect ScalarEvolution's ability to
3506 /// compute a trip count, or if the loop is deleted.
3507 void ScalarEvolution::forgetLoop(const Loop *L) {
3508 // Drop any stored trip count value.
3509 BackedgeTakenCounts.erase(L);
3511 // Drop information about expressions based on loop-header PHIs.
3512 SmallVector<Instruction *, 16> Worklist;
3513 PushLoopPHIs(L, Worklist);
3515 SmallPtrSet<Instruction *, 8> Visited;
3516 while (!Worklist.empty()) {
3517 Instruction *I = Worklist.pop_back_val();
3518 if (!Visited.insert(I)) continue;
3520 std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3521 Scalars.find(static_cast<Value *>(I));
3522 if (It != Scalars.end()) {
3523 ValuesAtScopes.erase(It->second);
3525 if (PHINode *PN = dyn_cast<PHINode>(I))
3526 ConstantEvolutionLoopExitValue.erase(PN);
3529 PushDefUseChildren(I, Worklist);
3533 /// forgetValue - This method should be called by the client when it has
3534 /// changed a value in a way that may effect its value, or which may
3535 /// disconnect it from a def-use chain linking it to a loop.
3536 void ScalarEvolution::forgetValue(Value *V) {
3537 Instruction *I = dyn_cast<Instruction>(V);
3540 // Drop information about expressions based on loop-header PHIs.
3541 SmallVector<Instruction *, 16> Worklist;
3542 Worklist.push_back(I);
3544 SmallPtrSet<Instruction *, 8> Visited;
3545 while (!Worklist.empty()) {
3546 I = Worklist.pop_back_val();
3547 if (!Visited.insert(I)) continue;
3549 std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3550 Scalars.find(static_cast<Value *>(I));
3551 if (It != Scalars.end()) {
3552 ValuesAtScopes.erase(It->second);
3554 if (PHINode *PN = dyn_cast<PHINode>(I))
3555 ConstantEvolutionLoopExitValue.erase(PN);
3558 PushDefUseChildren(I, Worklist);
3562 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
3563 /// of the specified loop will execute.
3564 ScalarEvolution::BackedgeTakenInfo
3565 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3566 SmallVector<BasicBlock *, 8> ExitingBlocks;
3567 L->getExitingBlocks(ExitingBlocks);
3569 // Examine all exits and pick the most conservative values.
3570 const SCEV *BECount = getCouldNotCompute();
3571 const SCEV *MaxBECount = getCouldNotCompute();
3572 bool CouldNotComputeBECount = false;
3573 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3574 BackedgeTakenInfo NewBTI =
3575 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3577 if (NewBTI.Exact == getCouldNotCompute()) {
3578 // We couldn't compute an exact value for this exit, so
3579 // we won't be able to compute an exact value for the loop.
3580 CouldNotComputeBECount = true;
3581 BECount = getCouldNotCompute();
3582 } else if (!CouldNotComputeBECount) {
3583 if (BECount == getCouldNotCompute())
3584 BECount = NewBTI.Exact;
3586 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3588 if (MaxBECount == getCouldNotCompute())
3589 MaxBECount = NewBTI.Max;
3590 else if (NewBTI.Max != getCouldNotCompute())
3591 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3594 return BackedgeTakenInfo(BECount, MaxBECount);
3597 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3598 /// of the specified loop will execute if it exits via the specified block.
3599 ScalarEvolution::BackedgeTakenInfo
3600 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3601 BasicBlock *ExitingBlock) {
3603 // Okay, we've chosen an exiting block. See what condition causes us to
3604 // exit at this block.
3606 // FIXME: we should be able to handle switch instructions (with a single exit)
3607 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3608 if (ExitBr == 0) return getCouldNotCompute();
3609 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3611 // At this point, we know we have a conditional branch that determines whether
3612 // the loop is exited. However, we don't know if the branch is executed each
3613 // time through the loop. If not, then the execution count of the branch will
3614 // not be equal to the trip count of the loop.
3616 // Currently we check for this by checking to see if the Exit branch goes to
3617 // the loop header. If so, we know it will always execute the same number of
3618 // times as the loop. We also handle the case where the exit block *is* the
3619 // loop header. This is common for un-rotated loops.
3621 // If both of those tests fail, walk up the unique predecessor chain to the
3622 // header, stopping if there is an edge that doesn't exit the loop. If the
3623 // header is reached, the execution count of the branch will be equal to the
3624 // trip count of the loop.
3626 // More extensive analysis could be done to handle more cases here.
3628 if (ExitBr->getSuccessor(0) != L->getHeader() &&
3629 ExitBr->getSuccessor(1) != L->getHeader() &&
3630 ExitBr->getParent() != L->getHeader()) {
3631 // The simple checks failed, try climbing the unique predecessor chain
3632 // up to the header.
3634 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3635 BasicBlock *Pred = BB->getUniquePredecessor();
3637 return getCouldNotCompute();
3638 TerminatorInst *PredTerm = Pred->getTerminator();
3639 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3640 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3643 // If the predecessor has a successor that isn't BB and isn't
3644 // outside the loop, assume the worst.
3645 if (L->contains(PredSucc))
3646 return getCouldNotCompute();
3648 if (Pred == L->getHeader()) {
3655 return getCouldNotCompute();
3658 // Proceed to the next level to examine the exit condition expression.
3659 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3660 ExitBr->getSuccessor(0),
3661 ExitBr->getSuccessor(1));
3664 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3665 /// backedge of the specified loop will execute if its exit condition
3666 /// were a conditional branch of ExitCond, TBB, and FBB.
3667 ScalarEvolution::BackedgeTakenInfo
3668 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3672 // Check if the controlling expression for this loop is an And or Or.
3673 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3674 if (BO->getOpcode() == Instruction::And) {
3675 // Recurse on the operands of the and.
3676 BackedgeTakenInfo BTI0 =
3677 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3678 BackedgeTakenInfo BTI1 =
3679 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3680 const SCEV *BECount = getCouldNotCompute();
3681 const SCEV *MaxBECount = getCouldNotCompute();
3682 if (L->contains(TBB)) {
3683 // Both conditions must be true for the loop to continue executing.
3684 // Choose the less conservative count.
3685 if (BTI0.Exact == getCouldNotCompute() ||
3686 BTI1.Exact == getCouldNotCompute())
3687 BECount = getCouldNotCompute();
3689 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3690 if (BTI0.Max == getCouldNotCompute())
3691 MaxBECount = BTI1.Max;
3692 else if (BTI1.Max == getCouldNotCompute())
3693 MaxBECount = BTI0.Max;
3695 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3697 // Both conditions must be true for the loop to exit.
3698 assert(L->contains(FBB) && "Loop block has no successor in loop!");
3699 if (BTI0.Exact != getCouldNotCompute() &&
3700 BTI1.Exact != getCouldNotCompute())
3701 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3702 if (BTI0.Max != getCouldNotCompute() &&
3703 BTI1.Max != getCouldNotCompute())
3704 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3707 return BackedgeTakenInfo(BECount, MaxBECount);
3709 if (BO->getOpcode() == Instruction::Or) {
3710 // Recurse on the operands of the or.
3711 BackedgeTakenInfo BTI0 =
3712 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3713 BackedgeTakenInfo BTI1 =
3714 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3715 const SCEV *BECount = getCouldNotCompute();
3716 const SCEV *MaxBECount = getCouldNotCompute();
3717 if (L->contains(FBB)) {
3718 // Both conditions must be false for the loop to continue executing.
3719 // Choose the less conservative count.
3720 if (BTI0.Exact == getCouldNotCompute() ||
3721 BTI1.Exact == getCouldNotCompute())
3722 BECount = getCouldNotCompute();
3724 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3725 if (BTI0.Max == getCouldNotCompute())
3726 MaxBECount = BTI1.Max;
3727 else if (BTI1.Max == getCouldNotCompute())
3728 MaxBECount = BTI0.Max;
3730 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3732 // Both conditions must be false for the loop to exit.
3733 assert(L->contains(TBB) && "Loop block has no successor in loop!");
3734 if (BTI0.Exact != getCouldNotCompute() &&
3735 BTI1.Exact != getCouldNotCompute())
3736 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3737 if (BTI0.Max != getCouldNotCompute() &&
3738 BTI1.Max != getCouldNotCompute())
3739 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3742 return BackedgeTakenInfo(BECount, MaxBECount);
3746 // With an icmp, it may be feasible to compute an exact backedge-taken count.
3747 // Proceed to the next level to examine the icmp.
3748 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3749 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3751 // Check for a constant condition. These are normally stripped out by
3752 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3753 // preserve the CFG and is temporarily leaving constant conditions
3755 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
3756 if (L->contains(FBB) == !CI->getZExtValue())
3757 // The backedge is always taken.
3758 return getCouldNotCompute();
3760 // The backedge is never taken.
3761 return getConstant(CI->getType(), 0);
3764 // If it's not an integer or pointer comparison then compute it the hard way.
3765 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3768 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3769 /// backedge of the specified loop will execute if its exit condition
3770 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3771 ScalarEvolution::BackedgeTakenInfo
3772 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3777 // If the condition was exit on true, convert the condition to exit on false
3778 ICmpInst::Predicate Cond;
3779 if (!L->contains(FBB))
3780 Cond = ExitCond->getPredicate();
3782 Cond = ExitCond->getInversePredicate();
3784 // Handle common loops like: for (X = "string"; *X; ++X)
3785 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3786 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3787 BackedgeTakenInfo ItCnt =
3788 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3789 if (ItCnt.hasAnyInfo())
3793 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3794 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3796 // Try to evaluate any dependencies out of the loop.
3797 LHS = getSCEVAtScope(LHS, L);
3798 RHS = getSCEVAtScope(RHS, L);
3800 // At this point, we would like to compute how many iterations of the
3801 // loop the predicate will return true for these inputs.
3802 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3803 // If there is a loop-invariant, force it into the RHS.
3804 std::swap(LHS, RHS);
3805 Cond = ICmpInst::getSwappedPredicate(Cond);
3808 // Simplify the operands before analyzing them.
3809 (void)SimplifyICmpOperands(Cond, LHS, RHS);
3811 // If we have a comparison of a chrec against a constant, try to use value
3812 // ranges to answer this query.
3813 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3814 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3815 if (AddRec->getLoop() == L) {
3816 // Form the constant range.
3817 ConstantRange CompRange(
3818 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3820 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3821 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3825 case ICmpInst::ICMP_NE: { // while (X != Y)
3826 // Convert to: while (X-Y != 0)
3827 BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3828 if (BTI.hasAnyInfo()) return BTI;
3831 case ICmpInst::ICMP_EQ: { // while (X == Y)
3832 // Convert to: while (X-Y == 0)
3833 BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3834 if (BTI.hasAnyInfo()) return BTI;
3837 case ICmpInst::ICMP_SLT: {
3838 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3839 if (BTI.hasAnyInfo()) return BTI;
3842 case ICmpInst::ICMP_SGT: {
3843 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3844 getNotSCEV(RHS), L, true);
3845 if (BTI.hasAnyInfo()) return BTI;
3848 case ICmpInst::ICMP_ULT: {
3849 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3850 if (BTI.hasAnyInfo()) return BTI;
3853 case ICmpInst::ICMP_UGT: {
3854 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3855 getNotSCEV(RHS), L, false);
3856 if (BTI.hasAnyInfo()) return BTI;
3861 dbgs() << "ComputeBackedgeTakenCount ";
3862 if (ExitCond->getOperand(0)->getType()->isUnsigned())
3863 dbgs() << "[unsigned] ";
3864 dbgs() << *LHS << " "
3865 << Instruction::getOpcodeName(Instruction::ICmp)
3866 << " " << *RHS << "\n";
3871 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3874 static ConstantInt *
3875 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3876 ScalarEvolution &SE) {
3877 const SCEV *InVal = SE.getConstant(C);
3878 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3879 assert(isa<SCEVConstant>(Val) &&
3880 "Evaluation of SCEV at constant didn't fold correctly?");
3881 return cast<SCEVConstant>(Val)->getValue();
3884 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3885 /// and a GEP expression (missing the pointer index) indexing into it, return
3886 /// the addressed element of the initializer or null if the index expression is
3889 GetAddressedElementFromGlobal(GlobalVariable *GV,
3890 const std::vector<ConstantInt*> &Indices) {
3891 Constant *Init = GV->getInitializer();
3892 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3893 uint64_t Idx = Indices[i]->getZExtValue();
3894 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3895 assert(Idx < CS->getNumOperands() && "Bad struct index!");
3896 Init = cast<Constant>(CS->getOperand(Idx));
3897 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3898 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
3899 Init = cast<Constant>(CA->getOperand(Idx));
3900 } else if (isa<ConstantAggregateZero>(Init)) {
3901 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3902 assert(Idx < STy->getNumElements() && "Bad struct index!");
3903 Init = Constant::getNullValue(STy->getElementType(Idx));
3904 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3905 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
3906 Init = Constant::getNullValue(ATy->getElementType());
3908 llvm_unreachable("Unknown constant aggregate type!");
3912 return 0; // Unknown initializer type
3918 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3919 /// 'icmp op load X, cst', try to see if we can compute the backedge
3920 /// execution count.
3921 ScalarEvolution::BackedgeTakenInfo
3922 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3926 ICmpInst::Predicate predicate) {
3927 if (LI->isVolatile()) return getCouldNotCompute();
3929 // Check to see if the loaded pointer is a getelementptr of a global.
3930 // TODO: Use SCEV instead of manually grubbing with GEPs.
3931 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3932 if (!GEP) return getCouldNotCompute();
3934 // Make sure that it is really a constant global we are gepping, with an
3935 // initializer, and make sure the first IDX is really 0.
3936 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3937 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
3938 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3939 !cast<Constant>(GEP->getOperand(1))->isNullValue())
3940 return getCouldNotCompute();
3942 // Okay, we allow one non-constant index into the GEP instruction.
3944 std::vector<ConstantInt*> Indexes;
3945 unsigned VarIdxNum = 0;
3946 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3947 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3948 Indexes.push_back(CI);
3949 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3950 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
3951 VarIdx = GEP->getOperand(i);
3953 Indexes.push_back(0);
3956 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3957 // Check to see if X is a loop variant variable value now.
3958 const SCEV *Idx = getSCEV(VarIdx);
3959 Idx = getSCEVAtScope(Idx, L);
3961 // We can only recognize very limited forms of loop index expressions, in
3962 // particular, only affine AddRec's like {C1,+,C2}.
3963 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3964 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3965 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3966 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3967 return getCouldNotCompute();
3969 unsigned MaxSteps = MaxBruteForceIterations;
3970 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3971 ConstantInt *ItCst = ConstantInt::get(
3972 cast<IntegerType>(IdxExpr->getType()), IterationNum);
3973 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3975 // Form the GEP offset.
3976 Indexes[VarIdxNum] = Val;
3978 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3979 if (Result == 0) break; // Cannot compute!
3981 // Evaluate the condition for this iteration.
3982 Result = ConstantExpr::getICmp(predicate, Result, RHS);
3983 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
3984 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3986 dbgs() << "\n***\n*** Computed loop count " << *ItCst
3987 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3990 ++NumArrayLenItCounts;
3991 return getConstant(ItCst); // Found terminating iteration!
3994 return getCouldNotCompute();
3998 /// CanConstantFold - Return true if we can constant fold an instruction of the
3999 /// specified type, assuming that all operands were constants.
4000 static bool CanConstantFold(const Instruction *I) {
4001 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4002 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
4005 if (const CallInst *CI = dyn_cast<CallInst>(I))
4006 if (const Function *F = CI->getCalledFunction())
4007 return canConstantFoldCallTo(F);
4011 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4012 /// in the loop that V is derived from. We allow arbitrary operations along the
4013 /// way, but the operands of an operation must either be constants or a value
4014 /// derived from a constant PHI. If this expression does not fit with these
4015 /// constraints, return null.
4016 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4017 // If this is not an instruction, or if this is an instruction outside of the
4018 // loop, it can't be derived from a loop PHI.
4019 Instruction *I = dyn_cast<Instruction>(V);
4020 if (I == 0 || !L->contains(I)) return 0;
4022 if (PHINode *PN = dyn_cast<PHINode>(I)) {
4023 if (L->getHeader() == I->getParent())
4026 // We don't currently keep track of the control flow needed to evaluate
4027 // PHIs, so we cannot handle PHIs inside of loops.
4031 // If we won't be able to constant fold this expression even if the operands
4032 // are constants, return early.
4033 if (!CanConstantFold(I)) return 0;
4035 // Otherwise, we can evaluate this instruction if all of its operands are
4036 // constant or derived from a PHI node themselves.
4038 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4039 if (!(isa<Constant>(I->getOperand(Op)) ||
4040 isa<GlobalValue>(I->getOperand(Op)))) {
4041 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4042 if (P == 0) return 0; // Not evolving from PHI
4046 return 0; // Evolving from multiple different PHIs.
4049 // This is a expression evolving from a constant PHI!
4053 /// EvaluateExpression - Given an expression that passes the
4054 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4055 /// in the loop has the value PHIVal. If we can't fold this expression for some
4056 /// reason, return null.
4057 static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4058 const TargetData *TD) {
4059 if (isa<PHINode>(V)) return PHIVal;
4060 if (Constant *C = dyn_cast<Constant>(V)) return C;
4061 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
4062 Instruction *I = cast<Instruction>(V);
4064 std::vector<Constant*> Operands;
4065 Operands.resize(I->getNumOperands());
4067 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4068 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4069 if (Operands[i] == 0) return 0;
4072 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4073 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4075 return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4076 &Operands[0], Operands.size(), TD);
4079 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4080 /// in the header of its containing loop, we know the loop executes a
4081 /// constant number of times, and the PHI node is just a recurrence
4082 /// involving constants, fold it.
4084 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4087 std::map<PHINode*, Constant*>::iterator I =
4088 ConstantEvolutionLoopExitValue.find(PN);
4089 if (I != ConstantEvolutionLoopExitValue.end())
4092 if (BEs.ugt(MaxBruteForceIterations))
4093 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
4095 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4097 // Since the loop is canonicalized, the PHI node must have two entries. One
4098 // entry must be a constant (coming in from outside of the loop), and the
4099 // second must be derived from the same PHI.
4100 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4101 Constant *StartCST =
4102 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4104 return RetVal = 0; // Must be a constant.
4106 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4107 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4109 return RetVal = 0; // Not derived from same PHI.
4111 // Execute the loop symbolically to determine the exit value.
4112 if (BEs.getActiveBits() >= 32)
4113 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4115 unsigned NumIterations = BEs.getZExtValue(); // must be in range
4116 unsigned IterationNum = 0;
4117 for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4118 if (IterationNum == NumIterations)
4119 return RetVal = PHIVal; // Got exit value!
4121 // Compute the value of the PHI node for the next iteration.
4122 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4123 if (NextPHI == PHIVal)
4124 return RetVal = NextPHI; // Stopped evolving!
4126 return 0; // Couldn't evaluate!
4131 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4132 /// constant number of times (the condition evolves only from constants),
4133 /// try to evaluate a few iterations of the loop until we get the exit
4134 /// condition gets a value of ExitWhen (true or false). If we cannot
4135 /// evaluate the trip count of the loop, return getCouldNotCompute().
4137 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4140 PHINode *PN = getConstantEvolvingPHI(Cond, L);
4141 if (PN == 0) return getCouldNotCompute();
4143 // Since the loop is canonicalized, the PHI node must have two entries. One
4144 // entry must be a constant (coming in from outside of the loop), and the
4145 // second must be derived from the same PHI.
4146 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4147 Constant *StartCST =
4148 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4149 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
4151 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4152 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4153 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
4155 // Okay, we find a PHI node that defines the trip count of this loop. Execute
4156 // the loop symbolically to determine when the condition gets a value of
4158 unsigned IterationNum = 0;
4159 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
4160 for (Constant *PHIVal = StartCST;
4161 IterationNum != MaxIterations; ++IterationNum) {
4162 ConstantInt *CondVal =
4163 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4165 // Couldn't symbolically evaluate.
4166 if (!CondVal) return getCouldNotCompute();
4168 if (CondVal->getValue() == uint64_t(ExitWhen)) {
4169 ++NumBruteForceTripCountsComputed;
4170 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4173 // Compute the value of the PHI node for the next iteration.
4174 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4175 if (NextPHI == 0 || NextPHI == PHIVal)
4176 return getCouldNotCompute();// Couldn't evaluate or not making progress...
4180 // Too many iterations were needed to evaluate.
4181 return getCouldNotCompute();
4184 /// getSCEVAtScope - Return a SCEV expression for the specified value
4185 /// at the specified scope in the program. The L value specifies a loop
4186 /// nest to evaluate the expression at, where null is the top-level or a
4187 /// specified loop is immediately inside of the loop.
4189 /// This method can be used to compute the exit value for a variable defined
4190 /// in a loop by querying what the value will hold in the parent loop.
4192 /// In the case that a relevant loop exit value cannot be computed, the
4193 /// original value V is returned.
4194 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4195 // Check to see if we've folded this expression at this loop before.
4196 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4197 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4198 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4200 return Pair.first->second ? Pair.first->second : V;
4202 // Otherwise compute it.
4203 const SCEV *C = computeSCEVAtScope(V, L);
4204 ValuesAtScopes[V][L] = C;
4208 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4209 if (isa<SCEVConstant>(V)) return V;
4211 // If this instruction is evolved from a constant-evolving PHI, compute the
4212 // exit value from the loop without using SCEVs.
4213 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4214 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4215 const Loop *LI = (*this->LI)[I->getParent()];
4216 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
4217 if (PHINode *PN = dyn_cast<PHINode>(I))
4218 if (PN->getParent() == LI->getHeader()) {
4219 // Okay, there is no closed form solution for the PHI node. Check
4220 // to see if the loop that contains it has a known backedge-taken
4221 // count. If so, we may be able to force computation of the exit
4223 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4224 if (const SCEVConstant *BTCC =
4225 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4226 // Okay, we know how many times the containing loop executes. If
4227 // this is a constant evolving PHI node, get the final value at
4228 // the specified iteration number.
4229 Constant *RV = getConstantEvolutionLoopExitValue(PN,
4230 BTCC->getValue()->getValue(),
4232 if (RV) return getSCEV(RV);
4236 // Okay, this is an expression that we cannot symbolically evaluate
4237 // into a SCEV. Check to see if it's possible to symbolically evaluate
4238 // the arguments into constants, and if so, try to constant propagate the
4239 // result. This is particularly useful for computing loop exit values.
4240 if (CanConstantFold(I)) {
4241 std::vector<Constant*> Operands;
4242 Operands.reserve(I->getNumOperands());
4243 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4244 Value *Op = I->getOperand(i);
4245 if (Constant *C = dyn_cast<Constant>(Op)) {
4246 Operands.push_back(C);
4248 // If any of the operands is non-constant and if they are
4249 // non-integer and non-pointer, don't even try to analyze them
4250 // with scev techniques.
4251 if (!isSCEVable(Op->getType()))
4254 const SCEV *OpV = getSCEVAtScope(Op, L);
4255 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
4256 Constant *C = SC->getValue();
4257 if (C->getType() != Op->getType())
4258 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4262 Operands.push_back(C);
4263 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
4264 if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
4265 if (C->getType() != Op->getType())
4267 ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4271 Operands.push_back(C);
4281 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4282 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4283 Operands[0], Operands[1], TD);
4285 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4286 &Operands[0], Operands.size(), TD);
4292 // This is some other type of SCEVUnknown, just return it.
4296 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4297 // Avoid performing the look-up in the common case where the specified
4298 // expression has no loop-variant portions.
4299 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4300 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4301 if (OpAtScope != Comm->getOperand(i)) {
4302 // Okay, at least one of these operands is loop variant but might be
4303 // foldable. Build a new instance of the folded commutative expression.
4304 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4305 Comm->op_begin()+i);
4306 NewOps.push_back(OpAtScope);
4308 for (++i; i != e; ++i) {
4309 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4310 NewOps.push_back(OpAtScope);
4312 if (isa<SCEVAddExpr>(Comm))
4313 return getAddExpr(NewOps);
4314 if (isa<SCEVMulExpr>(Comm))
4315 return getMulExpr(NewOps);
4316 if (isa<SCEVSMaxExpr>(Comm))
4317 return getSMaxExpr(NewOps);
4318 if (isa<SCEVUMaxExpr>(Comm))
4319 return getUMaxExpr(NewOps);
4320 llvm_unreachable("Unknown commutative SCEV type!");
4323 // If we got here, all operands are loop invariant.
4327 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4328 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4329 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4330 if (LHS == Div->getLHS() && RHS == Div->getRHS())
4331 return Div; // must be loop invariant
4332 return getUDivExpr(LHS, RHS);
4335 // If this is a loop recurrence for a loop that does not contain L, then we
4336 // are dealing with the final value computed by the loop.
4337 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4338 if (!L || !AddRec->getLoop()->contains(L)) {
4339 // To evaluate this recurrence, we need to know how many times the AddRec
4340 // loop iterates. Compute this now.
4341 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4342 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4344 // Then, evaluate the AddRec.
4345 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4350 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4351 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4352 if (Op == Cast->getOperand())
4353 return Cast; // must be loop invariant
4354 return getZeroExtendExpr(Op, Cast->getType());
4357 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4358 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4359 if (Op == Cast->getOperand())
4360 return Cast; // must be loop invariant
4361 return getSignExtendExpr(Op, Cast->getType());
4364 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4365 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4366 if (Op == Cast->getOperand())
4367 return Cast; // must be loop invariant
4368 return getTruncateExpr(Op, Cast->getType());
4371 llvm_unreachable("Unknown SCEV type!");
4375 /// getSCEVAtScope - This is a convenience function which does
4376 /// getSCEVAtScope(getSCEV(V), L).
4377 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4378 return getSCEVAtScope(getSCEV(V), L);
4381 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4382 /// following equation:
4384 /// A * X = B (mod N)
4386 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4387 /// A and B isn't important.
4389 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4390 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4391 ScalarEvolution &SE) {
4392 uint32_t BW = A.getBitWidth();
4393 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4394 assert(A != 0 && "A must be non-zero.");
4398 // The gcd of A and N may have only one prime factor: 2. The number of
4399 // trailing zeros in A is its multiplicity
4400 uint32_t Mult2 = A.countTrailingZeros();
4403 // 2. Check if B is divisible by D.
4405 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4406 // is not less than multiplicity of this prime factor for D.
4407 if (B.countTrailingZeros() < Mult2)
4408 return SE.getCouldNotCompute();
4410 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4413 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
4414 // bit width during computations.
4415 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
4416 APInt Mod(BW + 1, 0);
4417 Mod.set(BW - Mult2); // Mod = N / D
4418 APInt I = AD.multiplicativeInverse(Mod);
4420 // 4. Compute the minimum unsigned root of the equation:
4421 // I * (B / D) mod (N / D)
4422 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4424 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4426 return SE.getConstant(Result.trunc(BW));
4429 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4430 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
4431 /// might be the same) or two SCEVCouldNotCompute objects.
4433 static std::pair<const SCEV *,const SCEV *>
4434 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4435 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4436 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4437 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4438 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4440 // We currently can only solve this if the coefficients are constants.
4441 if (!LC || !MC || !NC) {
4442 const SCEV *CNC = SE.getCouldNotCompute();
4443 return std::make_pair(CNC, CNC);
4446 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4447 const APInt &L = LC->getValue()->getValue();
4448 const APInt &M = MC->getValue()->getValue();
4449 const APInt &N = NC->getValue()->getValue();
4450 APInt Two(BitWidth, 2);
4451 APInt Four(BitWidth, 4);
4454 using namespace APIntOps;
4456 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4457 // The B coefficient is M-N/2
4461 // The A coefficient is N/2
4462 APInt A(N.sdiv(Two));
4464 // Compute the B^2-4ac term.
4467 SqrtTerm -= Four * (A * C);
4469 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4470 // integer value or else APInt::sqrt() will assert.
4471 APInt SqrtVal(SqrtTerm.sqrt());
4473 // Compute the two solutions for the quadratic formula.
4474 // The divisions must be performed as signed divisions.
4476 APInt TwoA( A << 1 );
4477 if (TwoA.isMinValue()) {
4478 const SCEV *CNC = SE.getCouldNotCompute();
4479 return std::make_pair(CNC, CNC);
4482 LLVMContext &Context = SE.getContext();
4484 ConstantInt *Solution1 =
4485 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4486 ConstantInt *Solution2 =
4487 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4489 return std::make_pair(SE.getConstant(Solution1),
4490 SE.getConstant(Solution2));
4491 } // end APIntOps namespace
4494 /// HowFarToZero - Return the number of times a backedge comparing the specified
4495 /// value to zero will execute. If not computable, return CouldNotCompute.
4496 ScalarEvolution::BackedgeTakenInfo
4497 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4498 // If the value is a constant
4499 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4500 // If the value is already zero, the branch will execute zero times.
4501 if (C->getValue()->isZero()) return C;
4502 return getCouldNotCompute(); // Otherwise it will loop infinitely.
4505 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4506 if (!AddRec || AddRec->getLoop() != L)
4507 return getCouldNotCompute();
4509 if (AddRec->isAffine()) {
4510 // If this is an affine expression, the execution count of this branch is
4511 // the minimum unsigned root of the following equation:
4513 // Start + Step*N = 0 (mod 2^BW)
4517 // Step*N = -Start (mod 2^BW)
4519 // where BW is the common bit width of Start and Step.
4521 // Get the initial value for the loop.
4522 const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4523 L->getParentLoop());
4524 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4525 L->getParentLoop());
4527 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4528 // For now we handle only constant steps.
4530 // First, handle unitary steps.
4531 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
4532 return getNegativeSCEV(Start); // N = -Start (as unsigned)
4533 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
4534 return Start; // N = Start (as unsigned)
4536 // Then, try to solve the above equation provided that Start is constant.
4537 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4538 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4539 -StartC->getValue()->getValue(),
4542 } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4543 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4544 // the quadratic equation to solve it.
4545 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4547 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4548 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4551 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4552 << " sol#2: " << *R2 << "\n";
4554 // Pick the smallest positive root value.
4555 if (ConstantInt *CB =
4556 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4557 R1->getValue(), R2->getValue()))) {
4558 if (CB->getZExtValue() == false)
4559 std::swap(R1, R2); // R1 is the minimum root now.
4561 // We can only use this value if the chrec ends up with an exact zero
4562 // value at this index. When solving for "X*X != 5", for example, we
4563 // should not accept a root of 2.
4564 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4566 return R1; // We found a quadratic root!
4571 return getCouldNotCompute();
4574 /// HowFarToNonZero - Return the number of times a backedge checking the
4575 /// specified value for nonzero will execute. If not computable, return
4577 ScalarEvolution::BackedgeTakenInfo
4578 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4579 // Loops that look like: while (X == 0) are very strange indeed. We don't
4580 // handle them yet except for the trivial case. This could be expanded in the
4581 // future as needed.
4583 // If the value is a constant, check to see if it is known to be non-zero
4584 // already. If so, the backedge will execute zero times.
4585 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4586 if (!C->getValue()->isNullValue())
4587 return getConstant(C->getType(), 0);
4588 return getCouldNotCompute(); // Otherwise it will loop infinitely.
4591 // We could implement others, but I really doubt anyone writes loops like
4592 // this, and if they did, they would already be constant folded.
4593 return getCouldNotCompute();
4596 /// getLoopPredecessor - If the given loop's header has exactly one unique
4597 /// predecessor outside the loop, return it. Otherwise return null.
4598 /// This is less strict that the loop "preheader" concept, which requires
4599 /// the predecessor to have only one single successor.
4601 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4602 BasicBlock *Header = L->getHeader();
4603 BasicBlock *Pred = 0;
4604 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4606 if (!L->contains(*PI)) {
4607 if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4613 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4614 /// (which may not be an immediate predecessor) which has exactly one
4615 /// successor from which BB is reachable, or null if no such block is
4618 std::pair<BasicBlock *, BasicBlock *>
4619 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4620 // If the block has a unique predecessor, then there is no path from the
4621 // predecessor to the block that does not go through the direct edge
4622 // from the predecessor to the block.
4623 if (BasicBlock *Pred = BB->getSinglePredecessor())
4624 return std::make_pair(Pred, BB);
4626 // A loop's header is defined to be a block that dominates the loop.
4627 // If the header has a unique predecessor outside the loop, it must be
4628 // a block that has exactly one successor that can reach the loop.
4629 if (Loop *L = LI->getLoopFor(BB))
4630 return std::make_pair(getLoopPredecessor(L), L->getHeader());
4632 return std::pair<BasicBlock *, BasicBlock *>();
4635 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4636 /// testing whether two expressions are equal, however for the purposes of
4637 /// looking for a condition guarding a loop, it can be useful to be a little
4638 /// more general, since a front-end may have replicated the controlling
4641 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4642 // Quick check to see if they are the same SCEV.
4643 if (A == B) return true;
4645 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4646 // two different instructions with the same value. Check for this case.
4647 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4648 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4649 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4650 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4651 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4654 // Otherwise assume they may have a different value.
4658 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
4659 /// predicate Pred. Return true iff any changes were made.
4661 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
4662 const SCEV *&LHS, const SCEV *&RHS) {
4663 bool Changed = false;
4665 // Canonicalize a constant to the right side.
4666 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
4667 // Check for both operands constant.
4668 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
4669 if (ConstantExpr::getICmp(Pred,
4671 RHSC->getValue())->isNullValue())
4672 goto trivially_false;
4674 goto trivially_true;
4676 // Otherwise swap the operands to put the constant on the right.
4677 std::swap(LHS, RHS);
4678 Pred = ICmpInst::getSwappedPredicate(Pred);
4682 // If we're comparing an addrec with a value which is loop-invariant in the
4683 // addrec's loop, put the addrec on the left. Also make a dominance check,
4684 // as both operands could be addrecs loop-invariant in each other's loop.
4685 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
4686 const Loop *L = AR->getLoop();
4687 if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
4688 std::swap(LHS, RHS);
4689 Pred = ICmpInst::getSwappedPredicate(Pred);
4694 // If there's a constant operand, canonicalize comparisons with boundary
4695 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
4696 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4697 const APInt &RA = RC->getValue()->getValue();
4699 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4700 case ICmpInst::ICMP_EQ:
4701 case ICmpInst::ICMP_NE:
4703 case ICmpInst::ICMP_UGE:
4704 if ((RA - 1).isMinValue()) {
4705 Pred = ICmpInst::ICMP_NE;
4706 RHS = getConstant(RA - 1);
4710 if (RA.isMaxValue()) {
4711 Pred = ICmpInst::ICMP_EQ;
4715 if (RA.isMinValue()) goto trivially_true;
4717 Pred = ICmpInst::ICMP_UGT;
4718 RHS = getConstant(RA - 1);
4721 case ICmpInst::ICMP_ULE:
4722 if ((RA + 1).isMaxValue()) {
4723 Pred = ICmpInst::ICMP_NE;
4724 RHS = getConstant(RA + 1);
4728 if (RA.isMinValue()) {
4729 Pred = ICmpInst::ICMP_EQ;
4733 if (RA.isMaxValue()) goto trivially_true;
4735 Pred = ICmpInst::ICMP_ULT;
4736 RHS = getConstant(RA + 1);
4739 case ICmpInst::ICMP_SGE:
4740 if ((RA - 1).isMinSignedValue()) {
4741 Pred = ICmpInst::ICMP_NE;
4742 RHS = getConstant(RA - 1);
4746 if (RA.isMaxSignedValue()) {
4747 Pred = ICmpInst::ICMP_EQ;
4751 if (RA.isMinSignedValue()) goto trivially_true;
4753 Pred = ICmpInst::ICMP_SGT;
4754 RHS = getConstant(RA - 1);
4757 case ICmpInst::ICMP_SLE:
4758 if ((RA + 1).isMaxSignedValue()) {
4759 Pred = ICmpInst::ICMP_NE;
4760 RHS = getConstant(RA + 1);
4764 if (RA.isMinSignedValue()) {
4765 Pred = ICmpInst::ICMP_EQ;
4769 if (RA.isMaxSignedValue()) goto trivially_true;
4771 Pred = ICmpInst::ICMP_SLT;
4772 RHS = getConstant(RA + 1);
4775 case ICmpInst::ICMP_UGT:
4776 if (RA.isMinValue()) {
4777 Pred = ICmpInst::ICMP_NE;
4781 if ((RA + 1).isMaxValue()) {
4782 Pred = ICmpInst::ICMP_EQ;
4783 RHS = getConstant(RA + 1);
4787 if (RA.isMaxValue()) goto trivially_false;
4789 case ICmpInst::ICMP_ULT:
4790 if (RA.isMaxValue()) {
4791 Pred = ICmpInst::ICMP_NE;
4795 if ((RA - 1).isMinValue()) {
4796 Pred = ICmpInst::ICMP_EQ;
4797 RHS = getConstant(RA - 1);
4801 if (RA.isMinValue()) goto trivially_false;
4803 case ICmpInst::ICMP_SGT:
4804 if (RA.isMinSignedValue()) {
4805 Pred = ICmpInst::ICMP_NE;
4809 if ((RA + 1).isMaxSignedValue()) {
4810 Pred = ICmpInst::ICMP_EQ;
4811 RHS = getConstant(RA + 1);
4815 if (RA.isMaxSignedValue()) goto trivially_false;
4817 case ICmpInst::ICMP_SLT:
4818 if (RA.isMaxSignedValue()) {
4819 Pred = ICmpInst::ICMP_NE;
4823 if ((RA - 1).isMinSignedValue()) {
4824 Pred = ICmpInst::ICMP_EQ;
4825 RHS = getConstant(RA - 1);
4829 if (RA.isMinSignedValue()) goto trivially_false;
4834 // Check for obvious equality.
4835 if (HasSameValue(LHS, RHS)) {
4836 if (ICmpInst::isTrueWhenEqual(Pred))
4837 goto trivially_true;
4838 if (ICmpInst::isFalseWhenEqual(Pred))
4839 goto trivially_false;
4842 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
4843 // adding or subtracting 1 from one of the operands.
4845 case ICmpInst::ICMP_SLE:
4846 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
4847 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
4848 /*HasNUW=*/false, /*HasNSW=*/true);
4849 Pred = ICmpInst::ICMP_SLT;
4851 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
4852 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
4853 /*HasNUW=*/false, /*HasNSW=*/true);
4854 Pred = ICmpInst::ICMP_SLT;
4858 case ICmpInst::ICMP_SGE:
4859 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
4860 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
4861 /*HasNUW=*/false, /*HasNSW=*/true);
4862 Pred = ICmpInst::ICMP_SGT;
4864 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
4865 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
4866 /*HasNUW=*/false, /*HasNSW=*/true);
4867 Pred = ICmpInst::ICMP_SGT;
4871 case ICmpInst::ICMP_ULE:
4872 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
4873 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
4874 /*HasNUW=*/true, /*HasNSW=*/false);
4875 Pred = ICmpInst::ICMP_ULT;
4877 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
4878 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
4879 /*HasNUW=*/true, /*HasNSW=*/false);
4880 Pred = ICmpInst::ICMP_ULT;
4884 case ICmpInst::ICMP_UGE:
4885 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
4886 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
4887 /*HasNUW=*/true, /*HasNSW=*/false);
4888 Pred = ICmpInst::ICMP_UGT;
4890 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
4891 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
4892 /*HasNUW=*/true, /*HasNSW=*/false);
4893 Pred = ICmpInst::ICMP_UGT;
4901 // TODO: More simplifications are possible here.
4907 LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
4908 Pred = ICmpInst::ICMP_EQ;
4913 LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
4914 Pred = ICmpInst::ICMP_NE;
4918 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4919 return getSignedRange(S).getSignedMax().isNegative();
4922 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4923 return getSignedRange(S).getSignedMin().isStrictlyPositive();
4926 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4927 return !getSignedRange(S).getSignedMin().isNegative();
4930 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4931 return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4934 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4935 return isKnownNegative(S) || isKnownPositive(S);
4938 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4939 const SCEV *LHS, const SCEV *RHS) {
4940 // Canonicalize the inputs first.
4941 (void)SimplifyICmpOperands(Pred, LHS, RHS);
4943 // If LHS or RHS is an addrec, check to see if the condition is true in
4944 // every iteration of the loop.
4945 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
4946 if (isLoopEntryGuardedByCond(
4947 AR->getLoop(), Pred, AR->getStart(), RHS) &&
4948 isLoopBackedgeGuardedByCond(
4949 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
4951 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
4952 if (isLoopEntryGuardedByCond(
4953 AR->getLoop(), Pred, LHS, AR->getStart()) &&
4954 isLoopBackedgeGuardedByCond(
4955 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
4958 // Otherwise see what can be done with known constant ranges.
4959 return isKnownPredicateWithRanges(Pred, LHS, RHS);
4963 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
4964 const SCEV *LHS, const SCEV *RHS) {
4965 if (HasSameValue(LHS, RHS))
4966 return ICmpInst::isTrueWhenEqual(Pred);
4968 // This code is split out from isKnownPredicate because it is called from
4969 // within isLoopEntryGuardedByCond.
4972 llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4974 case ICmpInst::ICMP_SGT:
4975 Pred = ICmpInst::ICMP_SLT;
4976 std::swap(LHS, RHS);
4977 case ICmpInst::ICMP_SLT: {
4978 ConstantRange LHSRange = getSignedRange(LHS);
4979 ConstantRange RHSRange = getSignedRange(RHS);
4980 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4982 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4986 case ICmpInst::ICMP_SGE:
4987 Pred = ICmpInst::ICMP_SLE;
4988 std::swap(LHS, RHS);
4989 case ICmpInst::ICMP_SLE: {
4990 ConstantRange LHSRange = getSignedRange(LHS);
4991 ConstantRange RHSRange = getSignedRange(RHS);
4992 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4994 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4998 case ICmpInst::ICMP_UGT:
4999 Pred = ICmpInst::ICMP_ULT;
5000 std::swap(LHS, RHS);
5001 case ICmpInst::ICMP_ULT: {
5002 ConstantRange LHSRange = getUnsignedRange(LHS);
5003 ConstantRange RHSRange = getUnsignedRange(RHS);
5004 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
5006 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
5010 case ICmpInst::ICMP_UGE:
5011 Pred = ICmpInst::ICMP_ULE;
5012 std::swap(LHS, RHS);
5013 case ICmpInst::ICMP_ULE: {
5014 ConstantRange LHSRange = getUnsignedRange(LHS);
5015 ConstantRange RHSRange = getUnsignedRange(RHS);
5016 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
5018 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
5022 case ICmpInst::ICMP_NE: {
5023 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
5025 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
5028 const SCEV *Diff = getMinusSCEV(LHS, RHS);
5029 if (isKnownNonZero(Diff))
5033 case ICmpInst::ICMP_EQ:
5034 // The check at the top of the function catches the case where
5035 // the values are known to be equal.
5041 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5042 /// protected by a conditional between LHS and RHS. This is used to
5043 /// to eliminate casts.
5045 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
5046 ICmpInst::Predicate Pred,
5047 const SCEV *LHS, const SCEV *RHS) {
5048 // Interpret a null as meaning no loop, where there is obviously no guard
5049 // (interprocedural conditions notwithstanding).
5050 if (!L) return true;
5052 BasicBlock *Latch = L->getLoopLatch();
5056 BranchInst *LoopContinuePredicate =
5057 dyn_cast<BranchInst>(Latch->getTerminator());
5058 if (!LoopContinuePredicate ||
5059 LoopContinuePredicate->isUnconditional())
5062 return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
5063 LoopContinuePredicate->getSuccessor(0) != L->getHeader());
5066 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5067 /// by a conditional between LHS and RHS. This is used to help avoid max
5068 /// expressions in loop trip counts, and to eliminate casts.
5070 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
5071 ICmpInst::Predicate Pred,
5072 const SCEV *LHS, const SCEV *RHS) {
5073 // Interpret a null as meaning no loop, where there is obviously no guard
5074 // (interprocedural conditions notwithstanding).
5075 if (!L) return false;
5077 // Starting at the loop predecessor, climb up the predecessor chain, as long
5078 // as there are predecessors that can be found that have unique successors
5079 // leading to the original header.
5080 for (std::pair<BasicBlock *, BasicBlock *>
5081 Pair(getLoopPredecessor(L), L->getHeader());
5083 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
5085 BranchInst *LoopEntryPredicate =
5086 dyn_cast<BranchInst>(Pair.first->getTerminator());
5087 if (!LoopEntryPredicate ||
5088 LoopEntryPredicate->isUnconditional())
5091 if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
5092 LoopEntryPredicate->getSuccessor(0) != Pair.second))
5099 /// isImpliedCond - Test whether the condition described by Pred, LHS,
5100 /// and RHS is true whenever the given Cond value evaluates to true.
5101 bool ScalarEvolution::isImpliedCond(Value *CondValue,
5102 ICmpInst::Predicate Pred,
5103 const SCEV *LHS, const SCEV *RHS,
5105 // Recursively handle And and Or conditions.
5106 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
5107 if (BO->getOpcode() == Instruction::And) {
5109 return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
5110 isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
5111 } else if (BO->getOpcode() == Instruction::Or) {
5113 return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
5114 isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
5118 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
5119 if (!ICI) return false;
5121 // Bail if the ICmp's operands' types are wider than the needed type
5122 // before attempting to call getSCEV on them. This avoids infinite
5123 // recursion, since the analysis of widening casts can require loop
5124 // exit condition information for overflow checking, which would
5126 if (getTypeSizeInBits(LHS->getType()) <
5127 getTypeSizeInBits(ICI->getOperand(0)->getType()))
5130 // Now that we found a conditional branch that dominates the loop, check to
5131 // see if it is the comparison we are looking for.
5132 ICmpInst::Predicate FoundPred;
5134 FoundPred = ICI->getInversePredicate();
5136 FoundPred = ICI->getPredicate();
5138 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
5139 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
5141 // Balance the types. The case where FoundLHS' type is wider than
5142 // LHS' type is checked for above.
5143 if (getTypeSizeInBits(LHS->getType()) >
5144 getTypeSizeInBits(FoundLHS->getType())) {
5145 if (CmpInst::isSigned(Pred)) {
5146 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
5147 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
5149 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
5150 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
5154 // Canonicalize the query to match the way instcombine will have
5155 // canonicalized the comparison.
5156 if (SimplifyICmpOperands(Pred, LHS, RHS))
5158 return CmpInst::isTrueWhenEqual(Pred);
5159 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
5160 if (FoundLHS == FoundRHS)
5161 return CmpInst::isFalseWhenEqual(Pred);
5163 // Check to see if we can make the LHS or RHS match.
5164 if (LHS == FoundRHS || RHS == FoundLHS) {
5165 if (isa<SCEVConstant>(RHS)) {
5166 std::swap(FoundLHS, FoundRHS);
5167 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
5169 std::swap(LHS, RHS);
5170 Pred = ICmpInst::getSwappedPredicate(Pred);
5174 // Check whether the found predicate is the same as the desired predicate.
5175 if (FoundPred == Pred)
5176 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
5178 // Check whether swapping the found predicate makes it the same as the
5179 // desired predicate.
5180 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
5181 if (isa<SCEVConstant>(RHS))
5182 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
5184 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
5185 RHS, LHS, FoundLHS, FoundRHS);
5188 // Check whether the actual condition is beyond sufficient.
5189 if (FoundPred == ICmpInst::ICMP_EQ)
5190 if (ICmpInst::isTrueWhenEqual(Pred))
5191 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
5193 if (Pred == ICmpInst::ICMP_NE)
5194 if (!ICmpInst::isTrueWhenEqual(FoundPred))
5195 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
5198 // Otherwise assume the worst.
5202 /// isImpliedCondOperands - Test whether the condition described by Pred,
5203 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5204 /// and FoundRHS is true.
5205 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5206 const SCEV *LHS, const SCEV *RHS,
5207 const SCEV *FoundLHS,
5208 const SCEV *FoundRHS) {
5209 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5210 FoundLHS, FoundRHS) ||
5211 // ~x < ~y --> x > y
5212 isImpliedCondOperandsHelper(Pred, LHS, RHS,
5213 getNotSCEV(FoundRHS),
5214 getNotSCEV(FoundLHS));
5217 /// isImpliedCondOperandsHelper - Test whether the condition described by
5218 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
5219 /// FoundLHS, and FoundRHS is true.
5221 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5222 const SCEV *LHS, const SCEV *RHS,
5223 const SCEV *FoundLHS,
5224 const SCEV *FoundRHS) {
5226 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5227 case ICmpInst::ICMP_EQ:
5228 case ICmpInst::ICMP_NE:
5229 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5232 case ICmpInst::ICMP_SLT:
5233 case ICmpInst::ICMP_SLE:
5234 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5235 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5238 case ICmpInst::ICMP_SGT:
5239 case ICmpInst::ICMP_SGE:
5240 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5241 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5244 case ICmpInst::ICMP_ULT:
5245 case ICmpInst::ICMP_ULE:
5246 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5247 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5250 case ICmpInst::ICMP_UGT:
5251 case ICmpInst::ICMP_UGE:
5252 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5253 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5261 /// getBECount - Subtract the end and start values and divide by the step,
5262 /// rounding up, to get the number of times the backedge is executed. Return
5263 /// CouldNotCompute if an intermediate computation overflows.
5264 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5268 assert(!isKnownNegative(Step) &&
5269 "This code doesn't handle negative strides yet!");
5271 const Type *Ty = Start->getType();
5272 const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
5273 const SCEV *Diff = getMinusSCEV(End, Start);
5274 const SCEV *RoundUp = getAddExpr(Step, NegOne);
5276 // Add an adjustment to the difference between End and Start so that
5277 // the division will effectively round up.
5278 const SCEV *Add = getAddExpr(Diff, RoundUp);
5281 // Check Add for unsigned overflow.
5282 // TODO: More sophisticated things could be done here.
5283 const Type *WideTy = IntegerType::get(getContext(),
5284 getTypeSizeInBits(Ty) + 1);
5285 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5286 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5287 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5288 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5289 return getCouldNotCompute();
5292 return getUDivExpr(Add, Step);
5295 /// HowManyLessThans - Return the number of times a backedge containing the
5296 /// specified less-than comparison will execute. If not computable, return
5297 /// CouldNotCompute.
5298 ScalarEvolution::BackedgeTakenInfo
5299 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5300 const Loop *L, bool isSigned) {
5301 // Only handle: "ADDREC < LoopInvariant".
5302 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5304 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5305 if (!AddRec || AddRec->getLoop() != L)
5306 return getCouldNotCompute();
5308 // Check to see if we have a flag which makes analysis easy.
5309 bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5310 AddRec->hasNoUnsignedWrap();
5312 if (AddRec->isAffine()) {
5313 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5314 const SCEV *Step = AddRec->getStepRecurrence(*this);
5317 return getCouldNotCompute();
5318 if (Step->isOne()) {
5319 // With unit stride, the iteration never steps past the limit value.
5320 } else if (isKnownPositive(Step)) {
5321 // Test whether a positive iteration can step past the limit
5322 // value and past the maximum value for its type in a single step.
5323 // Note that it's not sufficient to check NoWrap here, because even
5324 // though the value after a wrap is undefined, it's not undefined
5325 // behavior, so if wrap does occur, the loop could either terminate or
5326 // loop infinitely, but in either case, the loop is guaranteed to
5327 // iterate at least until the iteration where the wrapping occurs.
5328 const SCEV *One = getConstant(Step->getType(), 1);
5330 APInt Max = APInt::getSignedMaxValue(BitWidth);
5331 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5332 .slt(getSignedRange(RHS).getSignedMax()))
5333 return getCouldNotCompute();
5335 APInt Max = APInt::getMaxValue(BitWidth);
5336 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5337 .ult(getUnsignedRange(RHS).getUnsignedMax()))
5338 return getCouldNotCompute();
5341 // TODO: Handle negative strides here and below.
5342 return getCouldNotCompute();
5344 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5345 // m. So, we count the number of iterations in which {n,+,s} < m is true.
5346 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5347 // treat m-n as signed nor unsigned due to overflow possibility.
5349 // First, we get the value of the LHS in the first iteration: n
5350 const SCEV *Start = AddRec->getOperand(0);
5352 // Determine the minimum constant start value.
5353 const SCEV *MinStart = getConstant(isSigned ?
5354 getSignedRange(Start).getSignedMin() :
5355 getUnsignedRange(Start).getUnsignedMin());
5357 // If we know that the condition is true in order to enter the loop,
5358 // then we know that it will run exactly (m-n)/s times. Otherwise, we
5359 // only know that it will execute (max(m,n)-n)/s times. In both cases,
5360 // the division must round up.
5361 const SCEV *End = RHS;
5362 if (!isLoopEntryGuardedByCond(L,
5363 isSigned ? ICmpInst::ICMP_SLT :
5365 getMinusSCEV(Start, Step), RHS))
5366 End = isSigned ? getSMaxExpr(RHS, Start)
5367 : getUMaxExpr(RHS, Start);
5369 // Determine the maximum constant end value.
5370 const SCEV *MaxEnd = getConstant(isSigned ?
5371 getSignedRange(End).getSignedMax() :
5372 getUnsignedRange(End).getUnsignedMax());
5374 // If MaxEnd is within a step of the maximum integer value in its type,
5375 // adjust it down to the minimum value which would produce the same effect.
5376 // This allows the subsequent ceiling division of (N+(step-1))/step to
5377 // compute the correct value.
5378 const SCEV *StepMinusOne = getMinusSCEV(Step,
5379 getConstant(Step->getType(), 1));
5382 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5385 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5388 // Finally, we subtract these two values and divide, rounding up, to get
5389 // the number of times the backedge is executed.
5390 const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5392 // The maximum backedge count is similar, except using the minimum start
5393 // value and the maximum end value.
5394 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5396 return BackedgeTakenInfo(BECount, MaxBECount);
5399 return getCouldNotCompute();
5402 /// getNumIterationsInRange - Return the number of iterations of this loop that
5403 /// produce values in the specified constant range. Another way of looking at
5404 /// this is that it returns the first iteration number where the value is not in
5405 /// the condition, thus computing the exit count. If the iteration count can't
5406 /// be computed, an instance of SCEVCouldNotCompute is returned.
5407 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5408 ScalarEvolution &SE) const {
5409 if (Range.isFullSet()) // Infinite loop.
5410 return SE.getCouldNotCompute();
5412 // If the start is a non-zero constant, shift the range to simplify things.
5413 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5414 if (!SC->getValue()->isZero()) {
5415 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5416 Operands[0] = SE.getConstant(SC->getType(), 0);
5417 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5418 if (const SCEVAddRecExpr *ShiftedAddRec =
5419 dyn_cast<SCEVAddRecExpr>(Shifted))
5420 return ShiftedAddRec->getNumIterationsInRange(
5421 Range.subtract(SC->getValue()->getValue()), SE);
5422 // This is strange and shouldn't happen.
5423 return SE.getCouldNotCompute();
5426 // The only time we can solve this is when we have all constant indices.
5427 // Otherwise, we cannot determine the overflow conditions.
5428 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5429 if (!isa<SCEVConstant>(getOperand(i)))
5430 return SE.getCouldNotCompute();
5433 // Okay at this point we know that all elements of the chrec are constants and
5434 // that the start element is zero.
5436 // First check to see if the range contains zero. If not, the first
5438 unsigned BitWidth = SE.getTypeSizeInBits(getType());
5439 if (!Range.contains(APInt(BitWidth, 0)))
5440 return SE.getConstant(getType(), 0);
5443 // If this is an affine expression then we have this situation:
5444 // Solve {0,+,A} in Range === Ax in Range
5446 // We know that zero is in the range. If A is positive then we know that
5447 // the upper value of the range must be the first possible exit value.
5448 // If A is negative then the lower of the range is the last possible loop
5449 // value. Also note that we already checked for a full range.
5450 APInt One(BitWidth,1);
5451 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5452 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5454 // The exit value should be (End+A)/A.
5455 APInt ExitVal = (End + A).udiv(A);
5456 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5458 // Evaluate at the exit value. If we really did fall out of the valid
5459 // range, then we computed our trip count, otherwise wrap around or other
5460 // things must have happened.
5461 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5462 if (Range.contains(Val->getValue()))
5463 return SE.getCouldNotCompute(); // Something strange happened
5465 // Ensure that the previous value is in the range. This is a sanity check.
5466 assert(Range.contains(
5467 EvaluateConstantChrecAtConstant(this,
5468 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5469 "Linear scev computation is off in a bad way!");
5470 return SE.getConstant(ExitValue);
5471 } else if (isQuadratic()) {
5472 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5473 // quadratic equation to solve it. To do this, we must frame our problem in
5474 // terms of figuring out when zero is crossed, instead of when
5475 // Range.getUpper() is crossed.
5476 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5477 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5478 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5480 // Next, solve the constructed addrec
5481 std::pair<const SCEV *,const SCEV *> Roots =
5482 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5483 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5484 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5486 // Pick the smallest positive root value.
5487 if (ConstantInt *CB =
5488 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5489 R1->getValue(), R2->getValue()))) {
5490 if (CB->getZExtValue() == false)
5491 std::swap(R1, R2); // R1 is the minimum root now.
5493 // Make sure the root is not off by one. The returned iteration should
5494 // not be in the range, but the previous one should be. When solving
5495 // for "X*X < 5", for example, we should not return a root of 2.
5496 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5499 if (Range.contains(R1Val->getValue())) {
5500 // The next iteration must be out of the range...
5501 ConstantInt *NextVal =
5502 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5504 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5505 if (!Range.contains(R1Val->getValue()))
5506 return SE.getConstant(NextVal);
5507 return SE.getCouldNotCompute(); // Something strange happened
5510 // If R1 was not in the range, then it is a good return value. Make
5511 // sure that R1-1 WAS in the range though, just in case.
5512 ConstantInt *NextVal =
5513 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5514 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5515 if (Range.contains(R1Val->getValue()))
5517 return SE.getCouldNotCompute(); // Something strange happened
5522 return SE.getCouldNotCompute();
5527 //===----------------------------------------------------------------------===//
5528 // SCEVCallbackVH Class Implementation
5529 //===----------------------------------------------------------------------===//
5531 void ScalarEvolution::SCEVCallbackVH::deleted() {
5532 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5533 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5534 SE->ConstantEvolutionLoopExitValue.erase(PN);
5535 SE->Scalars.erase(getValPtr());
5536 // this now dangles!
5539 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
5540 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5542 // Forget all the expressions associated with users of the old value,
5543 // so that future queries will recompute the expressions using the new
5545 SmallVector<User *, 16> Worklist;
5546 SmallPtrSet<User *, 8> Visited;
5547 Value *Old = getValPtr();
5548 bool DeleteOld = false;
5549 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5551 Worklist.push_back(*UI);
5552 while (!Worklist.empty()) {
5553 User *U = Worklist.pop_back_val();
5554 // Deleting the Old value will cause this to dangle. Postpone
5555 // that until everything else is done.
5560 if (!Visited.insert(U))
5562 if (PHINode *PN = dyn_cast<PHINode>(U))
5563 SE->ConstantEvolutionLoopExitValue.erase(PN);
5564 SE->Scalars.erase(U);
5565 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5567 Worklist.push_back(*UI);
5569 // Delete the Old value if it (indirectly) references itself.
5571 if (PHINode *PN = dyn_cast<PHINode>(Old))
5572 SE->ConstantEvolutionLoopExitValue.erase(PN);
5573 SE->Scalars.erase(Old);
5574 // this now dangles!
5579 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5580 : CallbackVH(V), SE(se) {}
5582 //===----------------------------------------------------------------------===//
5583 // ScalarEvolution Class Implementation
5584 //===----------------------------------------------------------------------===//
5586 ScalarEvolution::ScalarEvolution()
5587 : FunctionPass(&ID) {
5590 bool ScalarEvolution::runOnFunction(Function &F) {
5592 LI = &getAnalysis<LoopInfo>();
5593 TD = getAnalysisIfAvailable<TargetData>();
5594 DT = &getAnalysis<DominatorTree>();
5598 void ScalarEvolution::releaseMemory() {
5600 BackedgeTakenCounts.clear();
5601 ConstantEvolutionLoopExitValue.clear();
5602 ValuesAtScopes.clear();
5603 UniqueSCEVs.clear();
5604 SCEVAllocator.Reset();
5607 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5608 AU.setPreservesAll();
5609 AU.addRequiredTransitive<LoopInfo>();
5610 AU.addRequiredTransitive<DominatorTree>();
5613 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5614 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5617 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5619 // Print all inner loops first
5620 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5621 PrintLoopInfo(OS, SE, *I);
5624 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5627 SmallVector<BasicBlock *, 8> ExitBlocks;
5628 L->getExitBlocks(ExitBlocks);
5629 if (ExitBlocks.size() != 1)
5630 OS << "<multiple exits> ";
5632 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5633 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5635 OS << "Unpredictable backedge-taken count. ";
5640 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5643 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5644 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5646 OS << "Unpredictable max backedge-taken count. ";
5652 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5653 // ScalarEvolution's implementation of the print method is to print
5654 // out SCEV values of all instructions that are interesting. Doing
5655 // this potentially causes it to create new SCEV objects though,
5656 // which technically conflicts with the const qualifier. This isn't
5657 // observable from outside the class though, so casting away the
5658 // const isn't dangerous.
5659 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5661 OS << "Classifying expressions for: ";
5662 WriteAsOperand(OS, F, /*PrintType=*/false);
5664 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5665 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
5668 const SCEV *SV = SE.getSCEV(&*I);
5671 const Loop *L = LI->getLoopFor((*I).getParent());
5673 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5680 OS << "\t\t" "Exits: ";
5681 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5682 if (!ExitValue->isLoopInvariant(L)) {
5683 OS << "<<Unknown>>";
5692 OS << "Determining loop execution counts for: ";
5693 WriteAsOperand(OS, F, /*PrintType=*/false);
5695 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5696 PrintLoopInfo(OS, &SE, *I);