1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
38 //===----------------------------------------------------------------------===//
40 // There are several good references for the techniques used in this analysis.
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 // On computational properties of chains of recurrences
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
59 //===----------------------------------------------------------------------===//
61 #include "llvm/Analysis/ScalarEvolution.h"
62 #include "llvm/ADT/Optional.h"
63 #include "llvm/ADT/STLExtras.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/Analysis/AssumptionCache.h"
67 #include "llvm/Analysis/ConstantFolding.h"
68 #include "llvm/Analysis/InstructionSimplify.h"
69 #include "llvm/Analysis/LoopInfo.h"
70 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
71 #include "llvm/Analysis/TargetLibraryInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/GetElementPtrTypeIterator.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalVariable.h"
81 #include "llvm/IR/InstIterator.h"
82 #include "llvm/IR/Instructions.h"
83 #include "llvm/IR/LLVMContext.h"
84 #include "llvm/IR/Metadata.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Support/ErrorHandling.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
94 #define DEBUG_TYPE "scalar-evolution"
96 STATISTIC(NumArrayLenItCounts,
97 "Number of trip counts computed with array length");
98 STATISTIC(NumTripCountsComputed,
99 "Number of loops with predictable loop counts");
100 STATISTIC(NumTripCountsNotComputed,
101 "Number of loops without predictable loop counts");
102 STATISTIC(NumBruteForceTripCountsComputed,
103 "Number of loops with trip counts computed by force");
105 static cl::opt<unsigned>
106 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
107 cl::desc("Maximum number of iterations SCEV will "
108 "symbolically execute a constant "
112 // FIXME: Enable this with XDEBUG when the test suite is clean.
114 VerifySCEV("verify-scev",
115 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
117 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
118 "Scalar Evolution Analysis", false, true)
119 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
120 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
121 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
122 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
123 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
124 "Scalar Evolution Analysis", false, true)
125 char ScalarEvolution::ID = 0;
127 //===----------------------------------------------------------------------===//
128 // SCEV class definitions
129 //===----------------------------------------------------------------------===//
131 //===----------------------------------------------------------------------===//
132 // Implementation of the SCEV class.
135 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
136 void SCEV::dump() const {
142 void SCEV::print(raw_ostream &OS) const {
143 switch (static_cast<SCEVTypes>(getSCEVType())) {
145 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
148 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
149 const SCEV *Op = Trunc->getOperand();
150 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
151 << *Trunc->getType() << ")";
155 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
156 const SCEV *Op = ZExt->getOperand();
157 OS << "(zext " << *Op->getType() << " " << *Op << " to "
158 << *ZExt->getType() << ")";
162 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
163 const SCEV *Op = SExt->getOperand();
164 OS << "(sext " << *Op->getType() << " " << *Op << " to "
165 << *SExt->getType() << ")";
169 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
170 OS << "{" << *AR->getOperand(0);
171 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
172 OS << ",+," << *AR->getOperand(i);
174 if (AR->getNoWrapFlags(FlagNUW))
176 if (AR->getNoWrapFlags(FlagNSW))
178 if (AR->getNoWrapFlags(FlagNW) &&
179 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
181 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
189 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
190 const char *OpStr = nullptr;
191 switch (NAry->getSCEVType()) {
192 case scAddExpr: OpStr = " + "; break;
193 case scMulExpr: OpStr = " * "; break;
194 case scUMaxExpr: OpStr = " umax "; break;
195 case scSMaxExpr: OpStr = " smax "; break;
198 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
201 if (std::next(I) != E)
205 switch (NAry->getSCEVType()) {
208 if (NAry->getNoWrapFlags(FlagNUW))
210 if (NAry->getNoWrapFlags(FlagNSW))
216 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
217 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
221 const SCEVUnknown *U = cast<SCEVUnknown>(this);
223 if (U->isSizeOf(AllocTy)) {
224 OS << "sizeof(" << *AllocTy << ")";
227 if (U->isAlignOf(AllocTy)) {
228 OS << "alignof(" << *AllocTy << ")";
234 if (U->isOffsetOf(CTy, FieldNo)) {
235 OS << "offsetof(" << *CTy << ", ";
236 FieldNo->printAsOperand(OS, false);
241 // Otherwise just print it normally.
242 U->getValue()->printAsOperand(OS, false);
245 case scCouldNotCompute:
246 OS << "***COULDNOTCOMPUTE***";
249 llvm_unreachable("Unknown SCEV kind!");
252 Type *SCEV::getType() const {
253 switch (static_cast<SCEVTypes>(getSCEVType())) {
255 return cast<SCEVConstant>(this)->getType();
259 return cast<SCEVCastExpr>(this)->getType();
264 return cast<SCEVNAryExpr>(this)->getType();
266 return cast<SCEVAddExpr>(this)->getType();
268 return cast<SCEVUDivExpr>(this)->getType();
270 return cast<SCEVUnknown>(this)->getType();
271 case scCouldNotCompute:
272 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
274 llvm_unreachable("Unknown SCEV kind!");
277 bool SCEV::isZero() const {
278 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
279 return SC->getValue()->isZero();
283 bool SCEV::isOne() const {
284 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
285 return SC->getValue()->isOne();
289 bool SCEV::isAllOnesValue() const {
290 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
291 return SC->getValue()->isAllOnesValue();
295 /// isNonConstantNegative - Return true if the specified scev is negated, but
297 bool SCEV::isNonConstantNegative() const {
298 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
299 if (!Mul) return false;
301 // If there is a constant factor, it will be first.
302 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
303 if (!SC) return false;
305 // Return true if the value is negative, this matches things like (-42 * V).
306 return SC->getValue()->getValue().isNegative();
309 SCEVCouldNotCompute::SCEVCouldNotCompute() :
310 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
312 bool SCEVCouldNotCompute::classof(const SCEV *S) {
313 return S->getSCEVType() == scCouldNotCompute;
316 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
318 ID.AddInteger(scConstant);
321 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
322 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
323 UniqueSCEVs.InsertNode(S, IP);
327 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
328 return getConstant(ConstantInt::get(getContext(), Val));
332 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
333 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
334 return getConstant(ConstantInt::get(ITy, V, isSigned));
337 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
338 unsigned SCEVTy, const SCEV *op, Type *ty)
339 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
341 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
342 const SCEV *op, Type *ty)
343 : SCEVCastExpr(ID, scTruncate, op, ty) {
344 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
345 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
346 "Cannot truncate non-integer value!");
349 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
350 const SCEV *op, Type *ty)
351 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
352 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
353 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
354 "Cannot zero extend non-integer value!");
357 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
358 const SCEV *op, Type *ty)
359 : SCEVCastExpr(ID, scSignExtend, op, ty) {
360 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
361 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
362 "Cannot sign extend non-integer value!");
365 void SCEVUnknown::deleted() {
366 // Clear this SCEVUnknown from various maps.
367 SE->forgetMemoizedResults(this);
369 // Remove this SCEVUnknown from the uniquing map.
370 SE->UniqueSCEVs.RemoveNode(this);
372 // Release the value.
376 void SCEVUnknown::allUsesReplacedWith(Value *New) {
377 // Clear this SCEVUnknown from various maps.
378 SE->forgetMemoizedResults(this);
380 // Remove this SCEVUnknown from the uniquing map.
381 SE->UniqueSCEVs.RemoveNode(this);
383 // Update this SCEVUnknown to point to the new value. This is needed
384 // because there may still be outstanding SCEVs which still point to
389 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
390 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
391 if (VCE->getOpcode() == Instruction::PtrToInt)
392 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
393 if (CE->getOpcode() == Instruction::GetElementPtr &&
394 CE->getOperand(0)->isNullValue() &&
395 CE->getNumOperands() == 2)
396 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
398 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
406 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
407 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
408 if (VCE->getOpcode() == Instruction::PtrToInt)
409 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
410 if (CE->getOpcode() == Instruction::GetElementPtr &&
411 CE->getOperand(0)->isNullValue()) {
413 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
414 if (StructType *STy = dyn_cast<StructType>(Ty))
415 if (!STy->isPacked() &&
416 CE->getNumOperands() == 3 &&
417 CE->getOperand(1)->isNullValue()) {
418 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
420 STy->getNumElements() == 2 &&
421 STy->getElementType(0)->isIntegerTy(1)) {
422 AllocTy = STy->getElementType(1);
431 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
432 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
433 if (VCE->getOpcode() == Instruction::PtrToInt)
434 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
435 if (CE->getOpcode() == Instruction::GetElementPtr &&
436 CE->getNumOperands() == 3 &&
437 CE->getOperand(0)->isNullValue() &&
438 CE->getOperand(1)->isNullValue()) {
440 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
441 // Ignore vector types here so that ScalarEvolutionExpander doesn't
442 // emit getelementptrs that index into vectors.
443 if (Ty->isStructTy() || Ty->isArrayTy()) {
445 FieldNo = CE->getOperand(2);
453 //===----------------------------------------------------------------------===//
455 //===----------------------------------------------------------------------===//
458 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
459 /// than the complexity of the RHS. This comparator is used to canonicalize
461 class SCEVComplexityCompare {
462 const LoopInfo *const LI;
464 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
466 // Return true or false if LHS is less than, or at least RHS, respectively.
467 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
468 return compare(LHS, RHS) < 0;
471 // Return negative, zero, or positive, if LHS is less than, equal to, or
472 // greater than RHS, respectively. A three-way result allows recursive
473 // comparisons to be more efficient.
474 int compare(const SCEV *LHS, const SCEV *RHS) const {
475 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
479 // Primarily, sort the SCEVs by their getSCEVType().
480 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
482 return (int)LType - (int)RType;
484 // Aside from the getSCEVType() ordering, the particular ordering
485 // isn't very important except that it's beneficial to be consistent,
486 // so that (a + b) and (b + a) don't end up as different expressions.
487 switch (static_cast<SCEVTypes>(LType)) {
489 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
490 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
492 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
493 // not as complete as it could be.
494 const Value *LV = LU->getValue(), *RV = RU->getValue();
496 // Order pointer values after integer values. This helps SCEVExpander
498 bool LIsPointer = LV->getType()->isPointerTy(),
499 RIsPointer = RV->getType()->isPointerTy();
500 if (LIsPointer != RIsPointer)
501 return (int)LIsPointer - (int)RIsPointer;
503 // Compare getValueID values.
504 unsigned LID = LV->getValueID(),
505 RID = RV->getValueID();
507 return (int)LID - (int)RID;
509 // Sort arguments by their position.
510 if (const Argument *LA = dyn_cast<Argument>(LV)) {
511 const Argument *RA = cast<Argument>(RV);
512 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
513 return (int)LArgNo - (int)RArgNo;
516 // For instructions, compare their loop depth, and their operand
517 // count. This is pretty loose.
518 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
519 const Instruction *RInst = cast<Instruction>(RV);
521 // Compare loop depths.
522 const BasicBlock *LParent = LInst->getParent(),
523 *RParent = RInst->getParent();
524 if (LParent != RParent) {
525 unsigned LDepth = LI->getLoopDepth(LParent),
526 RDepth = LI->getLoopDepth(RParent);
527 if (LDepth != RDepth)
528 return (int)LDepth - (int)RDepth;
531 // Compare the number of operands.
532 unsigned LNumOps = LInst->getNumOperands(),
533 RNumOps = RInst->getNumOperands();
534 return (int)LNumOps - (int)RNumOps;
541 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
542 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
544 // Compare constant values.
545 const APInt &LA = LC->getValue()->getValue();
546 const APInt &RA = RC->getValue()->getValue();
547 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
548 if (LBitWidth != RBitWidth)
549 return (int)LBitWidth - (int)RBitWidth;
550 return LA.ult(RA) ? -1 : 1;
554 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
555 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
557 // Compare addrec loop depths.
558 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
559 if (LLoop != RLoop) {
560 unsigned LDepth = LLoop->getLoopDepth(),
561 RDepth = RLoop->getLoopDepth();
562 if (LDepth != RDepth)
563 return (int)LDepth - (int)RDepth;
566 // Addrec complexity grows with operand count.
567 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
568 if (LNumOps != RNumOps)
569 return (int)LNumOps - (int)RNumOps;
571 // Lexicographically compare.
572 for (unsigned i = 0; i != LNumOps; ++i) {
573 long X = compare(LA->getOperand(i), RA->getOperand(i));
585 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
586 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
588 // Lexicographically compare n-ary expressions.
589 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
590 if (LNumOps != RNumOps)
591 return (int)LNumOps - (int)RNumOps;
593 for (unsigned i = 0; i != LNumOps; ++i) {
596 long X = compare(LC->getOperand(i), RC->getOperand(i));
600 return (int)LNumOps - (int)RNumOps;
604 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
605 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
607 // Lexicographically compare udiv expressions.
608 long X = compare(LC->getLHS(), RC->getLHS());
611 return compare(LC->getRHS(), RC->getRHS());
617 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
618 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
620 // Compare cast expressions by operand.
621 return compare(LC->getOperand(), RC->getOperand());
624 case scCouldNotCompute:
625 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
627 llvm_unreachable("Unknown SCEV kind!");
632 /// GroupByComplexity - Given a list of SCEV objects, order them by their
633 /// complexity, and group objects of the same complexity together by value.
634 /// When this routine is finished, we know that any duplicates in the vector are
635 /// consecutive and that complexity is monotonically increasing.
637 /// Note that we go take special precautions to ensure that we get deterministic
638 /// results from this routine. In other words, we don't want the results of
639 /// this to depend on where the addresses of various SCEV objects happened to
642 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
644 if (Ops.size() < 2) return; // Noop
645 if (Ops.size() == 2) {
646 // This is the common case, which also happens to be trivially simple.
648 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
649 if (SCEVComplexityCompare(LI)(RHS, LHS))
654 // Do the rough sort by complexity.
655 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
657 // Now that we are sorted by complexity, group elements of the same
658 // complexity. Note that this is, at worst, N^2, but the vector is likely to
659 // be extremely short in practice. Note that we take this approach because we
660 // do not want to depend on the addresses of the objects we are grouping.
661 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
662 const SCEV *S = Ops[i];
663 unsigned Complexity = S->getSCEVType();
665 // If there are any objects of the same complexity and same value as this
667 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
668 if (Ops[j] == S) { // Found a duplicate.
669 // Move it to immediately after i'th element.
670 std::swap(Ops[i+1], Ops[j]);
671 ++i; // no need to rescan it.
672 if (i == e-2) return; // Done!
679 struct FindSCEVSize {
681 FindSCEVSize() : Size(0) {}
683 bool follow(const SCEV *S) {
685 // Keep looking at all operands of S.
688 bool isDone() const {
694 // Returns the size of the SCEV S.
695 static inline int sizeOfSCEV(const SCEV *S) {
697 SCEVTraversal<FindSCEVSize> ST(F);
704 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
706 // Computes the Quotient and Remainder of the division of Numerator by
708 static void divide(ScalarEvolution &SE, const SCEV *Numerator,
709 const SCEV *Denominator, const SCEV **Quotient,
710 const SCEV **Remainder) {
711 assert(Numerator && Denominator && "Uninitialized SCEV");
713 SCEVDivision D(SE, Numerator, Denominator);
715 // Check for the trivial case here to avoid having to check for it in the
717 if (Numerator == Denominator) {
723 if (Numerator->isZero()) {
729 // Split the Denominator when it is a product.
730 if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
732 *Quotient = Numerator;
733 for (const SCEV *Op : T->operands()) {
734 divide(SE, *Quotient, Op, &Q, &R);
737 // Bail out when the Numerator is not divisible by one of the terms of
741 *Remainder = Numerator;
750 *Quotient = D.Quotient;
751 *Remainder = D.Remainder;
754 // Except in the trivial case described above, we do not know how to divide
755 // Expr by Denominator for the following functions with empty implementation.
756 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
757 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
758 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
759 void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
760 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
761 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
762 void visitUnknown(const SCEVUnknown *Numerator) {}
763 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
765 void visitConstant(const SCEVConstant *Numerator) {
766 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
767 APInt NumeratorVal = Numerator->getValue()->getValue();
768 APInt DenominatorVal = D->getValue()->getValue();
769 uint32_t NumeratorBW = NumeratorVal.getBitWidth();
770 uint32_t DenominatorBW = DenominatorVal.getBitWidth();
772 if (NumeratorBW > DenominatorBW)
773 DenominatorVal = DenominatorVal.sext(NumeratorBW);
774 else if (NumeratorBW < DenominatorBW)
775 NumeratorVal = NumeratorVal.sext(DenominatorBW);
777 APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
778 APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
779 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
780 Quotient = SE.getConstant(QuotientVal);
781 Remainder = SE.getConstant(RemainderVal);
786 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
787 const SCEV *StartQ, *StartR, *StepQ, *StepR;
788 assert(Numerator->isAffine() && "Numerator should be affine");
789 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
790 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
791 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
792 Numerator->getNoWrapFlags());
793 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
794 Numerator->getNoWrapFlags());
797 void visitAddExpr(const SCEVAddExpr *Numerator) {
798 SmallVector<const SCEV *, 2> Qs, Rs;
799 Type *Ty = Denominator->getType();
801 for (const SCEV *Op : Numerator->operands()) {
803 divide(SE, Op, Denominator, &Q, &R);
805 // Bail out if types do not match.
806 if (Ty != Q->getType() || Ty != R->getType()) {
808 Remainder = Numerator;
816 if (Qs.size() == 1) {
822 Quotient = SE.getAddExpr(Qs);
823 Remainder = SE.getAddExpr(Rs);
826 void visitMulExpr(const SCEVMulExpr *Numerator) {
827 SmallVector<const SCEV *, 2> Qs;
828 Type *Ty = Denominator->getType();
830 bool FoundDenominatorTerm = false;
831 for (const SCEV *Op : Numerator->operands()) {
832 // Bail out if types do not match.
833 if (Ty != Op->getType()) {
835 Remainder = Numerator;
839 if (FoundDenominatorTerm) {
844 // Check whether Denominator divides one of the product operands.
846 divide(SE, Op, Denominator, &Q, &R);
852 // Bail out if types do not match.
853 if (Ty != Q->getType()) {
855 Remainder = Numerator;
859 FoundDenominatorTerm = true;
863 if (FoundDenominatorTerm) {
868 Quotient = SE.getMulExpr(Qs);
872 if (!isa<SCEVUnknown>(Denominator)) {
874 Remainder = Numerator;
878 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
879 ValueToValueMap RewriteMap;
880 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
881 cast<SCEVConstant>(Zero)->getValue();
882 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
884 if (Remainder->isZero()) {
885 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
886 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
887 cast<SCEVConstant>(One)->getValue();
889 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
893 // Quotient is (Numerator - Remainder) divided by Denominator.
895 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
896 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
897 // This SCEV does not seem to simplify: fail the division here.
899 Remainder = Numerator;
902 divide(SE, Diff, Denominator, &Q, &R);
904 "(Numerator - Remainder) should evenly divide Denominator");
909 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
910 const SCEV *Denominator)
911 : SE(S), Denominator(Denominator) {
912 Zero = SE.getConstant(Denominator->getType(), 0);
913 One = SE.getConstant(Denominator->getType(), 1);
915 // By default, we don't know how to divide Expr by Denominator.
916 // Providing the default here simplifies the rest of the code.
918 Remainder = Numerator;
922 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
927 //===----------------------------------------------------------------------===//
928 // Simple SCEV method implementations
929 //===----------------------------------------------------------------------===//
931 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
933 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
936 // Handle the simplest case efficiently.
938 return SE.getTruncateOrZeroExtend(It, ResultTy);
940 // We are using the following formula for BC(It, K):
942 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
944 // Suppose, W is the bitwidth of the return value. We must be prepared for
945 // overflow. Hence, we must assure that the result of our computation is
946 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
947 // safe in modular arithmetic.
949 // However, this code doesn't use exactly that formula; the formula it uses
950 // is something like the following, where T is the number of factors of 2 in
951 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
954 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
956 // This formula is trivially equivalent to the previous formula. However,
957 // this formula can be implemented much more efficiently. The trick is that
958 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
959 // arithmetic. To do exact division in modular arithmetic, all we have
960 // to do is multiply by the inverse. Therefore, this step can be done at
963 // The next issue is how to safely do the division by 2^T. The way this
964 // is done is by doing the multiplication step at a width of at least W + T
965 // bits. This way, the bottom W+T bits of the product are accurate. Then,
966 // when we perform the division by 2^T (which is equivalent to a right shift
967 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
968 // truncated out after the division by 2^T.
970 // In comparison to just directly using the first formula, this technique
971 // is much more efficient; using the first formula requires W * K bits,
972 // but this formula less than W + K bits. Also, the first formula requires
973 // a division step, whereas this formula only requires multiplies and shifts.
975 // It doesn't matter whether the subtraction step is done in the calculation
976 // width or the input iteration count's width; if the subtraction overflows,
977 // the result must be zero anyway. We prefer here to do it in the width of
978 // the induction variable because it helps a lot for certain cases; CodeGen
979 // isn't smart enough to ignore the overflow, which leads to much less
980 // efficient code if the width of the subtraction is wider than the native
983 // (It's possible to not widen at all by pulling out factors of 2 before
984 // the multiplication; for example, K=2 can be calculated as
985 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
986 // extra arithmetic, so it's not an obvious win, and it gets
987 // much more complicated for K > 3.)
989 // Protection from insane SCEVs; this bound is conservative,
990 // but it probably doesn't matter.
992 return SE.getCouldNotCompute();
994 unsigned W = SE.getTypeSizeInBits(ResultTy);
996 // Calculate K! / 2^T and T; we divide out the factors of two before
997 // multiplying for calculating K! / 2^T to avoid overflow.
998 // Other overflow doesn't matter because we only care about the bottom
999 // W bits of the result.
1000 APInt OddFactorial(W, 1);
1002 for (unsigned i = 3; i <= K; ++i) {
1004 unsigned TwoFactors = Mult.countTrailingZeros();
1006 Mult = Mult.lshr(TwoFactors);
1007 OddFactorial *= Mult;
1010 // We need at least W + T bits for the multiplication step
1011 unsigned CalculationBits = W + T;
1013 // Calculate 2^T, at width T+W.
1014 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
1016 // Calculate the multiplicative inverse of K! / 2^T;
1017 // this multiplication factor will perform the exact division by
1019 APInt Mod = APInt::getSignedMinValue(W+1);
1020 APInt MultiplyFactor = OddFactorial.zext(W+1);
1021 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
1022 MultiplyFactor = MultiplyFactor.trunc(W);
1024 // Calculate the product, at width T+W
1025 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1027 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1028 for (unsigned i = 1; i != K; ++i) {
1029 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1030 Dividend = SE.getMulExpr(Dividend,
1031 SE.getTruncateOrZeroExtend(S, CalculationTy));
1035 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1037 // Truncate the result, and divide by K! / 2^T.
1039 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1040 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1043 /// evaluateAtIteration - Return the value of this chain of recurrences at
1044 /// the specified iteration number. We can evaluate this recurrence by
1045 /// multiplying each element in the chain by the binomial coefficient
1046 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
1048 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1050 /// where BC(It, k) stands for binomial coefficient.
1052 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1053 ScalarEvolution &SE) const {
1054 const SCEV *Result = getStart();
1055 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1056 // The computation is correct in the face of overflow provided that the
1057 // multiplication is performed _after_ the evaluation of the binomial
1059 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1060 if (isa<SCEVCouldNotCompute>(Coeff))
1063 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1068 //===----------------------------------------------------------------------===//
1069 // SCEV Expression folder implementations
1070 //===----------------------------------------------------------------------===//
1072 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
1074 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1075 "This is not a truncating conversion!");
1076 assert(isSCEVable(Ty) &&
1077 "This is not a conversion to a SCEVable type!");
1078 Ty = getEffectiveSCEVType(Ty);
1080 FoldingSetNodeID ID;
1081 ID.AddInteger(scTruncate);
1085 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1087 // Fold if the operand is constant.
1088 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1090 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1092 // trunc(trunc(x)) --> trunc(x)
1093 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1094 return getTruncateExpr(ST->getOperand(), Ty);
1096 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1097 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1098 return getTruncateOrSignExtend(SS->getOperand(), Ty);
1100 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1101 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1102 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
1104 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
1105 // eliminate all the truncates.
1106 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
1107 SmallVector<const SCEV *, 4> Operands;
1108 bool hasTrunc = false;
1109 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
1110 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
1111 hasTrunc = isa<SCEVTruncateExpr>(S);
1112 Operands.push_back(S);
1115 return getAddExpr(Operands);
1116 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1119 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
1120 // eliminate all the truncates.
1121 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
1122 SmallVector<const SCEV *, 4> Operands;
1123 bool hasTrunc = false;
1124 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
1125 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
1126 hasTrunc = isa<SCEVTruncateExpr>(S);
1127 Operands.push_back(S);
1130 return getMulExpr(Operands);
1131 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1134 // If the input value is a chrec scev, truncate the chrec's operands.
1135 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1136 SmallVector<const SCEV *, 4> Operands;
1137 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1138 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
1139 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1142 // The cast wasn't folded; create an explicit cast node. We can reuse
1143 // the existing insert position since if we get here, we won't have
1144 // made any changes which would invalidate it.
1145 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1147 UniqueSCEVs.InsertNode(S, IP);
1151 // Get the limit of a recurrence such that incrementing by Step cannot cause
1152 // signed overflow as long as the value of the recurrence within the
1153 // loop does not exceed this limit before incrementing.
1154 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1155 ICmpInst::Predicate *Pred,
1156 ScalarEvolution *SE) {
1157 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1158 if (SE->isKnownPositive(Step)) {
1159 *Pred = ICmpInst::ICMP_SLT;
1160 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1161 SE->getSignedRange(Step).getSignedMax());
1163 if (SE->isKnownNegative(Step)) {
1164 *Pred = ICmpInst::ICMP_SGT;
1165 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1166 SE->getSignedRange(Step).getSignedMin());
1171 // Get the limit of a recurrence such that incrementing by Step cannot cause
1172 // unsigned overflow as long as the value of the recurrence within the loop does
1173 // not exceed this limit before incrementing.
1174 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1175 ICmpInst::Predicate *Pred,
1176 ScalarEvolution *SE) {
1177 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1178 *Pred = ICmpInst::ICMP_ULT;
1180 return SE->getConstant(APInt::getMinValue(BitWidth) -
1181 SE->getUnsignedRange(Step).getUnsignedMax());
1186 struct ExtendOpTraitsBase {
1187 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
1190 // Used to make code generic over signed and unsigned overflow.
1191 template <typename ExtendOp> struct ExtendOpTraits {
1194 // static const SCEV::NoWrapFlags WrapType;
1196 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1198 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1199 // ICmpInst::Predicate *Pred,
1200 // ScalarEvolution *SE);
1204 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1205 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1207 static const GetExtendExprTy GetExtendExpr;
1209 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1210 ICmpInst::Predicate *Pred,
1211 ScalarEvolution *SE) {
1212 return getSignedOverflowLimitForStep(Step, Pred, SE);
1216 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1217 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1220 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1221 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1223 static const GetExtendExprTy GetExtendExpr;
1225 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1226 ICmpInst::Predicate *Pred,
1227 ScalarEvolution *SE) {
1228 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1232 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1233 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1236 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1237 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1238 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1239 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1240 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1241 // expression "Step + sext/zext(PreIncAR)" is congruent with
1242 // "sext/zext(PostIncAR)"
1243 template <typename ExtendOpTy>
1244 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1245 ScalarEvolution *SE) {
1246 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1247 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1249 const Loop *L = AR->getLoop();
1250 const SCEV *Start = AR->getStart();
1251 const SCEV *Step = AR->getStepRecurrence(*SE);
1253 // Check for a simple looking step prior to loop entry.
1254 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1258 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1259 // subtraction is expensive. For this purpose, perform a quick and dirty
1260 // difference, by checking for Step in the operand list.
1261 SmallVector<const SCEV *, 4> DiffOps;
1262 for (const SCEV *Op : SA->operands())
1264 DiffOps.push_back(Op);
1266 if (DiffOps.size() == SA->getNumOperands())
1269 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1272 // 1. NSW/NUW flags on the step increment.
1273 const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
1274 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1275 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1277 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1278 // "S+X does not sign/unsign-overflow".
1281 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1282 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1283 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1286 // 2. Direct overflow check on the step operation's expression.
1287 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1288 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1289 const SCEV *OperandExtendedStart =
1290 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
1291 (SE->*GetExtendExpr)(Step, WideTy));
1292 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
1293 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1294 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1295 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1296 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1297 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
1302 // 3. Loop precondition.
1303 ICmpInst::Predicate Pred;
1304 const SCEV *OverflowLimit =
1305 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1307 if (OverflowLimit &&
1308 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
1314 // Get the normalized zero or sign extended expression for this AddRec's Start.
1315 template <typename ExtendOpTy>
1316 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1317 ScalarEvolution *SE) {
1318 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1320 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
1322 return (SE->*GetExtendExpr)(AR->getStart(), Ty);
1324 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
1325 (SE->*GetExtendExpr)(PreStart, Ty));
1328 // Try to prove away overflow by looking at "nearby" add recurrences. A
1329 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1330 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1334 // {S,+,X} == {S-T,+,X} + T
1335 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1337 // If ({S-T,+,X} + T) does not overflow ... (1)
1339 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1341 // If {S-T,+,X} does not overflow ... (2)
1343 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1344 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1346 // If (S-T)+T does not overflow ... (3)
1348 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1349 // == {Ext(S),+,Ext(X)} == LHS
1351 // Thus, if (1), (2) and (3) are true for some T, then
1352 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1354 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1355 // does not overflow" restricted to the 0th iteration. Therefore we only need
1356 // to check for (1) and (2).
1358 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1359 // is `Delta` (defined below).
1361 template <typename ExtendOpTy>
1362 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1365 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1367 // We restrict `Start` to a constant to prevent SCEV from spending too much
1368 // time here. It is correct (but more expensive) to continue with a
1369 // non-constant `Start` and do a general SCEV subtraction to compute
1370 // `PreStart` below.
1372 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1376 APInt StartAI = StartC->getValue()->getValue();
1378 for (unsigned Delta : {-2, -1, 1, 2}) {
1379 const SCEV *PreStart = getConstant(StartAI - Delta);
1381 // Give up if we don't already have the add recurrence we need because
1382 // actually constructing an add recurrence is relatively expensive.
1383 const SCEVAddRecExpr *PreAR = [&]() {
1384 FoldingSetNodeID ID;
1385 ID.AddInteger(scAddRecExpr);
1386 ID.AddPointer(PreStart);
1387 ID.AddPointer(Step);
1390 return static_cast<SCEVAddRecExpr *>(
1391 UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1394 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1395 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1396 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1397 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1398 DeltaS, &Pred, this);
1399 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1407 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
1409 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1410 "This is not an extending conversion!");
1411 assert(isSCEVable(Ty) &&
1412 "This is not a conversion to a SCEVable type!");
1413 Ty = getEffectiveSCEVType(Ty);
1415 // Fold if the operand is constant.
1416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1418 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1420 // zext(zext(x)) --> zext(x)
1421 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1422 return getZeroExtendExpr(SZ->getOperand(), Ty);
1424 // Before doing any expensive analysis, check to see if we've already
1425 // computed a SCEV for this Op and Ty.
1426 FoldingSetNodeID ID;
1427 ID.AddInteger(scZeroExtend);
1431 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1433 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1434 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1435 // It's possible the bits taken off by the truncate were all zero bits. If
1436 // so, we should be able to simplify this further.
1437 const SCEV *X = ST->getOperand();
1438 ConstantRange CR = getUnsignedRange(X);
1439 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1440 unsigned NewBits = getTypeSizeInBits(Ty);
1441 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1442 CR.zextOrTrunc(NewBits)))
1443 return getTruncateOrZeroExtend(X, Ty);
1446 // If the input value is a chrec scev, and we can prove that the value
1447 // did not overflow the old, smaller, value, we can zero extend all of the
1448 // operands (often constants). This allows analysis of something like
1449 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1450 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1451 if (AR->isAffine()) {
1452 const SCEV *Start = AR->getStart();
1453 const SCEV *Step = AR->getStepRecurrence(*this);
1454 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1455 const Loop *L = AR->getLoop();
1457 // If we have special knowledge that this addrec won't overflow,
1458 // we don't need to do any further analysis.
1459 if (AR->getNoWrapFlags(SCEV::FlagNUW))
1460 return getAddRecExpr(
1461 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1462 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1464 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1465 // Note that this serves two purposes: It filters out loops that are
1466 // simply not analyzable, and it covers the case where this code is
1467 // being called from within backedge-taken count analysis, such that
1468 // attempting to ask for the backedge-taken count would likely result
1469 // in infinite recursion. In the later case, the analysis code will
1470 // cope with a conservative value, and it will take care to purge
1471 // that value once it has finished.
1472 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1473 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1474 // Manually compute the final value for AR, checking for
1477 // Check whether the backedge-taken count can be losslessly casted to
1478 // the addrec's type. The count is always unsigned.
1479 const SCEV *CastedMaxBECount =
1480 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1481 const SCEV *RecastedMaxBECount =
1482 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1483 if (MaxBECount == RecastedMaxBECount) {
1484 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1485 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1486 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
1487 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
1488 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
1489 const SCEV *WideMaxBECount =
1490 getZeroExtendExpr(CastedMaxBECount, WideTy);
1491 const SCEV *OperandExtendedAdd =
1492 getAddExpr(WideStart,
1493 getMulExpr(WideMaxBECount,
1494 getZeroExtendExpr(Step, WideTy)));
1495 if (ZAdd == OperandExtendedAdd) {
1496 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1497 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1498 // Return the expression with the addrec on the outside.
1499 return getAddRecExpr(
1500 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1501 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1503 // Similar to above, only this time treat the step value as signed.
1504 // This covers loops that count down.
1505 OperandExtendedAdd =
1506 getAddExpr(WideStart,
1507 getMulExpr(WideMaxBECount,
1508 getSignExtendExpr(Step, WideTy)));
1509 if (ZAdd == OperandExtendedAdd) {
1510 // Cache knowledge of AR NW, which is propagated to this AddRec.
1511 // Negative step causes unsigned wrap, but it still can't self-wrap.
1512 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1513 // Return the expression with the addrec on the outside.
1514 return getAddRecExpr(
1515 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1516 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1520 // If the backedge is guarded by a comparison with the pre-inc value
1521 // the addrec is safe. Also, if the entry is guarded by a comparison
1522 // with the start value and the backedge is guarded by a comparison
1523 // with the post-inc value, the addrec is safe.
1524 if (isKnownPositive(Step)) {
1525 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1526 getUnsignedRange(Step).getUnsignedMax());
1527 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1528 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1529 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1530 AR->getPostIncExpr(*this), N))) {
1531 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1532 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1533 // Return the expression with the addrec on the outside.
1534 return getAddRecExpr(
1535 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1536 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1538 } else if (isKnownNegative(Step)) {
1539 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1540 getSignedRange(Step).getSignedMin());
1541 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1542 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1543 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1544 AR->getPostIncExpr(*this), N))) {
1545 // Cache knowledge of AR NW, which is propagated to this AddRec.
1546 // Negative step causes unsigned wrap, but it still can't self-wrap.
1547 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1548 // Return the expression with the addrec on the outside.
1549 return getAddRecExpr(
1550 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1551 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1556 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1557 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1558 return getAddRecExpr(
1559 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1560 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1564 // The cast wasn't folded; create an explicit cast node.
1565 // Recompute the insert position, as it may have been invalidated.
1566 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1567 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1569 UniqueSCEVs.InsertNode(S, IP);
1573 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1575 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1576 "This is not an extending conversion!");
1577 assert(isSCEVable(Ty) &&
1578 "This is not a conversion to a SCEVable type!");
1579 Ty = getEffectiveSCEVType(Ty);
1581 // Fold if the operand is constant.
1582 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1584 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1586 // sext(sext(x)) --> sext(x)
1587 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1588 return getSignExtendExpr(SS->getOperand(), Ty);
1590 // sext(zext(x)) --> zext(x)
1591 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1592 return getZeroExtendExpr(SZ->getOperand(), Ty);
1594 // Before doing any expensive analysis, check to see if we've already
1595 // computed a SCEV for this Op and Ty.
1596 FoldingSetNodeID ID;
1597 ID.AddInteger(scSignExtend);
1601 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1603 // If the input value is provably positive, build a zext instead.
1604 if (isKnownNonNegative(Op))
1605 return getZeroExtendExpr(Op, Ty);
1607 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1608 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1609 // It's possible the bits taken off by the truncate were all sign bits. If
1610 // so, we should be able to simplify this further.
1611 const SCEV *X = ST->getOperand();
1612 ConstantRange CR = getSignedRange(X);
1613 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1614 unsigned NewBits = getTypeSizeInBits(Ty);
1615 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1616 CR.sextOrTrunc(NewBits)))
1617 return getTruncateOrSignExtend(X, Ty);
1620 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
1621 if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
1622 if (SA->getNumOperands() == 2) {
1623 auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
1624 auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
1626 if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
1627 const APInt &C1 = SC1->getValue()->getValue();
1628 const APInt &C2 = SC2->getValue()->getValue();
1629 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
1630 C2.ugt(C1) && C2.isPowerOf2())
1631 return getAddExpr(getSignExtendExpr(SC1, Ty),
1632 getSignExtendExpr(SMul, Ty));
1637 // If the input value is a chrec scev, and we can prove that the value
1638 // did not overflow the old, smaller, value, we can sign extend all of the
1639 // operands (often constants). This allows analysis of something like
1640 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1641 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1642 if (AR->isAffine()) {
1643 const SCEV *Start = AR->getStart();
1644 const SCEV *Step = AR->getStepRecurrence(*this);
1645 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1646 const Loop *L = AR->getLoop();
1648 // If we have special knowledge that this addrec won't overflow,
1649 // we don't need to do any further analysis.
1650 if (AR->getNoWrapFlags(SCEV::FlagNSW))
1651 return getAddRecExpr(
1652 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1653 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
1655 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1656 // Note that this serves two purposes: It filters out loops that are
1657 // simply not analyzable, and it covers the case where this code is
1658 // being called from within backedge-taken count analysis, such that
1659 // attempting to ask for the backedge-taken count would likely result
1660 // in infinite recursion. In the later case, the analysis code will
1661 // cope with a conservative value, and it will take care to purge
1662 // that value once it has finished.
1663 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1664 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1665 // Manually compute the final value for AR, checking for
1668 // Check whether the backedge-taken count can be losslessly casted to
1669 // the addrec's type. The count is always unsigned.
1670 const SCEV *CastedMaxBECount =
1671 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1672 const SCEV *RecastedMaxBECount =
1673 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1674 if (MaxBECount == RecastedMaxBECount) {
1675 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1676 // Check whether Start+Step*MaxBECount has no signed overflow.
1677 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1678 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1679 const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1680 const SCEV *WideMaxBECount =
1681 getZeroExtendExpr(CastedMaxBECount, WideTy);
1682 const SCEV *OperandExtendedAdd =
1683 getAddExpr(WideStart,
1684 getMulExpr(WideMaxBECount,
1685 getSignExtendExpr(Step, WideTy)));
1686 if (SAdd == OperandExtendedAdd) {
1687 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1688 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1689 // Return the expression with the addrec on the outside.
1690 return getAddRecExpr(
1691 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1692 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1694 // Similar to above, only this time treat the step value as unsigned.
1695 // This covers loops that count up with an unsigned step.
1696 OperandExtendedAdd =
1697 getAddExpr(WideStart,
1698 getMulExpr(WideMaxBECount,
1699 getZeroExtendExpr(Step, WideTy)));
1700 if (SAdd == OperandExtendedAdd) {
1701 // If AR wraps around then
1703 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
1704 // => SAdd != OperandExtendedAdd
1706 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
1707 // (SAdd == OperandExtendedAdd => AR is NW)
1709 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1711 // Return the expression with the addrec on the outside.
1712 return getAddRecExpr(
1713 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1714 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1718 // If the backedge is guarded by a comparison with the pre-inc value
1719 // the addrec is safe. Also, if the entry is guarded by a comparison
1720 // with the start value and the backedge is guarded by a comparison
1721 // with the post-inc value, the addrec is safe.
1722 ICmpInst::Predicate Pred;
1723 const SCEV *OverflowLimit =
1724 getSignedOverflowLimitForStep(Step, &Pred, this);
1725 if (OverflowLimit &&
1726 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1727 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1728 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1730 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1731 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1732 return getAddRecExpr(
1733 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1734 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1737 // If Start and Step are constants, check if we can apply this
1739 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
1740 auto SC1 = dyn_cast<SCEVConstant>(Start);
1741 auto SC2 = dyn_cast<SCEVConstant>(Step);
1743 const APInt &C1 = SC1->getValue()->getValue();
1744 const APInt &C2 = SC2->getValue()->getValue();
1745 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
1747 Start = getSignExtendExpr(Start, Ty);
1748 const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
1749 L, AR->getNoWrapFlags());
1750 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
1754 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
1755 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1756 return getAddRecExpr(
1757 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1758 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1762 // The cast wasn't folded; create an explicit cast node.
1763 // Recompute the insert position, as it may have been invalidated.
1764 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1765 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1767 UniqueSCEVs.InsertNode(S, IP);
1771 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1772 /// unspecified bits out to the given type.
1774 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1776 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1777 "This is not an extending conversion!");
1778 assert(isSCEVable(Ty) &&
1779 "This is not a conversion to a SCEVable type!");
1780 Ty = getEffectiveSCEVType(Ty);
1782 // Sign-extend negative constants.
1783 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1784 if (SC->getValue()->getValue().isNegative())
1785 return getSignExtendExpr(Op, Ty);
1787 // Peel off a truncate cast.
1788 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1789 const SCEV *NewOp = T->getOperand();
1790 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1791 return getAnyExtendExpr(NewOp, Ty);
1792 return getTruncateOrNoop(NewOp, Ty);
1795 // Next try a zext cast. If the cast is folded, use it.
1796 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1797 if (!isa<SCEVZeroExtendExpr>(ZExt))
1800 // Next try a sext cast. If the cast is folded, use it.
1801 const SCEV *SExt = getSignExtendExpr(Op, Ty);
1802 if (!isa<SCEVSignExtendExpr>(SExt))
1805 // Force the cast to be folded into the operands of an addrec.
1806 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1807 SmallVector<const SCEV *, 4> Ops;
1808 for (const SCEV *Op : AR->operands())
1809 Ops.push_back(getAnyExtendExpr(Op, Ty));
1810 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1813 // If the expression is obviously signed, use the sext cast value.
1814 if (isa<SCEVSMaxExpr>(Op))
1817 // Absent any other information, use the zext cast value.
1821 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1822 /// a list of operands to be added under the given scale, update the given
1823 /// map. This is a helper function for getAddRecExpr. As an example of
1824 /// what it does, given a sequence of operands that would form an add
1825 /// expression like this:
1827 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
1829 /// where A and B are constants, update the map with these values:
1831 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1833 /// and add 13 + A*B*29 to AccumulatedConstant.
1834 /// This will allow getAddRecExpr to produce this:
1836 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1838 /// This form often exposes folding opportunities that are hidden in
1839 /// the original operand list.
1841 /// Return true iff it appears that any interesting folding opportunities
1842 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1843 /// the common case where no interesting opportunities are present, and
1844 /// is also used as a check to avoid infinite recursion.
1847 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1848 SmallVectorImpl<const SCEV *> &NewOps,
1849 APInt &AccumulatedConstant,
1850 const SCEV *const *Ops, size_t NumOperands,
1852 ScalarEvolution &SE) {
1853 bool Interesting = false;
1855 // Iterate over the add operands. They are sorted, with constants first.
1857 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1859 // Pull a buried constant out to the outside.
1860 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1862 AccumulatedConstant += Scale * C->getValue()->getValue();
1865 // Next comes everything else. We're especially interested in multiplies
1866 // here, but they're in the middle, so just visit the rest with one loop.
1867 for (; i != NumOperands; ++i) {
1868 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1869 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1871 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1872 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1873 // A multiplication of a constant with another add; recurse.
1874 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1876 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1877 Add->op_begin(), Add->getNumOperands(),
1880 // A multiplication of a constant with some other value. Update
1882 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1883 const SCEV *Key = SE.getMulExpr(MulOps);
1884 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1885 M.insert(std::make_pair(Key, NewScale));
1887 NewOps.push_back(Pair.first->first);
1889 Pair.first->second += NewScale;
1890 // The map already had an entry for this value, which may indicate
1891 // a folding opportunity.
1896 // An ordinary operand. Update the map.
1897 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1898 M.insert(std::make_pair(Ops[i], Scale));
1900 NewOps.push_back(Pair.first->first);
1902 Pair.first->second += Scale;
1903 // The map already had an entry for this value, which may indicate
1904 // a folding opportunity.
1914 struct APIntCompare {
1915 bool operator()(const APInt &LHS, const APInt &RHS) const {
1916 return LHS.ult(RHS);
1921 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
1922 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
1923 // can't-overflow flags for the operation if possible.
1924 static SCEV::NoWrapFlags
1925 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
1926 const SmallVectorImpl<const SCEV *> &Ops,
1927 SCEV::NoWrapFlags OldFlags) {
1928 using namespace std::placeholders;
1931 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
1933 assert(CanAnalyze && "don't call from other places!");
1935 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1936 SCEV::NoWrapFlags SignOrUnsignWrap =
1937 ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask);
1939 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1940 auto IsKnownNonNegative =
1941 std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1);
1943 if (SignOrUnsignWrap == SCEV::FlagNSW &&
1944 std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative))
1945 return ScalarEvolution::setFlags(OldFlags,
1946 (SCEV::NoWrapFlags)SignOrUnsignMask);
1951 /// getAddExpr - Get a canonical add expression, or something simpler if
1953 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1954 SCEV::NoWrapFlags Flags) {
1955 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1956 "only nuw or nsw allowed");
1957 assert(!Ops.empty() && "Cannot get empty add!");
1958 if (Ops.size() == 1) return Ops[0];
1960 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1961 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1962 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1963 "SCEVAddExpr operand types don't match!");
1966 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
1968 // Sort by complexity, this groups all similar expression types together.
1969 GroupByComplexity(Ops, LI);
1971 // If there are any constants, fold them together.
1973 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1975 assert(Idx < Ops.size());
1976 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1977 // We found two constants, fold them together!
1978 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1979 RHSC->getValue()->getValue());
1980 if (Ops.size() == 2) return Ops[0];
1981 Ops.erase(Ops.begin()+1); // Erase the folded element
1982 LHSC = cast<SCEVConstant>(Ops[0]);
1985 // If we are left with a constant zero being added, strip it off.
1986 if (LHSC->getValue()->isZero()) {
1987 Ops.erase(Ops.begin());
1991 if (Ops.size() == 1) return Ops[0];
1994 // Okay, check to see if the same value occurs in the operand list more than
1995 // once. If so, merge them together into an multiply expression. Since we
1996 // sorted the list, these values are required to be adjacent.
1997 Type *Ty = Ops[0]->getType();
1998 bool FoundMatch = false;
1999 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2000 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2001 // Scan ahead to count how many equal operands there are.
2003 while (i+Count != e && Ops[i+Count] == Ops[i])
2005 // Merge the values into a multiply.
2006 const SCEV *Scale = getConstant(Ty, Count);
2007 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
2008 if (Ops.size() == Count)
2011 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2012 --i; e -= Count - 1;
2016 return getAddExpr(Ops, Flags);
2018 // Check for truncates. If all the operands are truncated from the same
2019 // type, see if factoring out the truncate would permit the result to be
2020 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
2021 // if the contents of the resulting outer trunc fold to something simple.
2022 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
2023 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
2024 Type *DstType = Trunc->getType();
2025 Type *SrcType = Trunc->getOperand()->getType();
2026 SmallVector<const SCEV *, 8> LargeOps;
2028 // Check all the operands to see if they can be represented in the
2029 // source type of the truncate.
2030 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2031 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2032 if (T->getOperand()->getType() != SrcType) {
2036 LargeOps.push_back(T->getOperand());
2037 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2038 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2039 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2040 SmallVector<const SCEV *, 8> LargeMulOps;
2041 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2042 if (const SCEVTruncateExpr *T =
2043 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2044 if (T->getOperand()->getType() != SrcType) {
2048 LargeMulOps.push_back(T->getOperand());
2049 } else if (const SCEVConstant *C =
2050 dyn_cast<SCEVConstant>(M->getOperand(j))) {
2051 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2058 LargeOps.push_back(getMulExpr(LargeMulOps));
2065 // Evaluate the expression in the larger type.
2066 const SCEV *Fold = getAddExpr(LargeOps, Flags);
2067 // If it folds to something simple, use it. Otherwise, don't.
2068 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2069 return getTruncateExpr(Fold, DstType);
2073 // Skip past any other cast SCEVs.
2074 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2077 // If there are add operands they would be next.
2078 if (Idx < Ops.size()) {
2079 bool DeletedAdd = false;
2080 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2081 // If we have an add, expand the add operands onto the end of the operands
2083 Ops.erase(Ops.begin()+Idx);
2084 Ops.append(Add->op_begin(), Add->op_end());
2088 // If we deleted at least one add, we added operands to the end of the list,
2089 // and they are not necessarily sorted. Recurse to resort and resimplify
2090 // any operands we just acquired.
2092 return getAddExpr(Ops);
2095 // Skip over the add expression until we get to a multiply.
2096 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2099 // Check to see if there are any folding opportunities present with
2100 // operands multiplied by constant values.
2101 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2102 uint64_t BitWidth = getTypeSizeInBits(Ty);
2103 DenseMap<const SCEV *, APInt> M;
2104 SmallVector<const SCEV *, 8> NewOps;
2105 APInt AccumulatedConstant(BitWidth, 0);
2106 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2107 Ops.data(), Ops.size(),
2108 APInt(BitWidth, 1), *this)) {
2109 // Some interesting folding opportunity is present, so its worthwhile to
2110 // re-generate the operands list. Group the operands by constant scale,
2111 // to avoid multiplying by the same constant scale multiple times.
2112 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2113 for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
2114 E = NewOps.end(); I != E; ++I)
2115 MulOpLists[M.find(*I)->second].push_back(*I);
2116 // Re-generate the operands list.
2118 if (AccumulatedConstant != 0)
2119 Ops.push_back(getConstant(AccumulatedConstant));
2120 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
2121 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
2123 Ops.push_back(getMulExpr(getConstant(I->first),
2124 getAddExpr(I->second)));
2126 return getConstant(Ty, 0);
2127 if (Ops.size() == 1)
2129 return getAddExpr(Ops);
2133 // If we are adding something to a multiply expression, make sure the
2134 // something is not already an operand of the multiply. If so, merge it into
2136 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2137 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2138 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2139 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2140 if (isa<SCEVConstant>(MulOpSCEV))
2142 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2143 if (MulOpSCEV == Ops[AddOp]) {
2144 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2145 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2146 if (Mul->getNumOperands() != 2) {
2147 // If the multiply has more than two operands, we must get the
2149 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2150 Mul->op_begin()+MulOp);
2151 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2152 InnerMul = getMulExpr(MulOps);
2154 const SCEV *One = getConstant(Ty, 1);
2155 const SCEV *AddOne = getAddExpr(One, InnerMul);
2156 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
2157 if (Ops.size() == 2) return OuterMul;
2159 Ops.erase(Ops.begin()+AddOp);
2160 Ops.erase(Ops.begin()+Idx-1);
2162 Ops.erase(Ops.begin()+Idx);
2163 Ops.erase(Ops.begin()+AddOp-1);
2165 Ops.push_back(OuterMul);
2166 return getAddExpr(Ops);
2169 // Check this multiply against other multiplies being added together.
2170 for (unsigned OtherMulIdx = Idx+1;
2171 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2173 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2174 // If MulOp occurs in OtherMul, we can fold the two multiplies
2176 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2177 OMulOp != e; ++OMulOp)
2178 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2179 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2180 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2181 if (Mul->getNumOperands() != 2) {
2182 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2183 Mul->op_begin()+MulOp);
2184 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2185 InnerMul1 = getMulExpr(MulOps);
2187 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2188 if (OtherMul->getNumOperands() != 2) {
2189 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2190 OtherMul->op_begin()+OMulOp);
2191 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2192 InnerMul2 = getMulExpr(MulOps);
2194 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
2195 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
2196 if (Ops.size() == 2) return OuterMul;
2197 Ops.erase(Ops.begin()+Idx);
2198 Ops.erase(Ops.begin()+OtherMulIdx-1);
2199 Ops.push_back(OuterMul);
2200 return getAddExpr(Ops);
2206 // If there are any add recurrences in the operands list, see if any other
2207 // added values are loop invariant. If so, we can fold them into the
2209 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2212 // Scan over all recurrences, trying to fold loop invariants into them.
2213 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2214 // Scan all of the other operands to this add and add them to the vector if
2215 // they are loop invariant w.r.t. the recurrence.
2216 SmallVector<const SCEV *, 8> LIOps;
2217 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2218 const Loop *AddRecLoop = AddRec->getLoop();
2219 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2220 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2221 LIOps.push_back(Ops[i]);
2222 Ops.erase(Ops.begin()+i);
2226 // If we found some loop invariants, fold them into the recurrence.
2227 if (!LIOps.empty()) {
2228 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2229 LIOps.push_back(AddRec->getStart());
2231 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2233 AddRecOps[0] = getAddExpr(LIOps);
2235 // Build the new addrec. Propagate the NUW and NSW flags if both the
2236 // outer add and the inner addrec are guaranteed to have no overflow.
2237 // Always propagate NW.
2238 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2239 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2241 // If all of the other operands were loop invariant, we are done.
2242 if (Ops.size() == 1) return NewRec;
2244 // Otherwise, add the folded AddRec by the non-invariant parts.
2245 for (unsigned i = 0;; ++i)
2246 if (Ops[i] == AddRec) {
2250 return getAddExpr(Ops);
2253 // Okay, if there weren't any loop invariants to be folded, check to see if
2254 // there are multiple AddRec's with the same loop induction variable being
2255 // added together. If so, we can fold them.
2256 for (unsigned OtherIdx = Idx+1;
2257 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2259 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2260 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2261 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2263 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2265 if (const SCEVAddRecExpr *OtherAddRec =
2266 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
2267 if (OtherAddRec->getLoop() == AddRecLoop) {
2268 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2270 if (i >= AddRecOps.size()) {
2271 AddRecOps.append(OtherAddRec->op_begin()+i,
2272 OtherAddRec->op_end());
2275 AddRecOps[i] = getAddExpr(AddRecOps[i],
2276 OtherAddRec->getOperand(i));
2278 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2280 // Step size has changed, so we cannot guarantee no self-wraparound.
2281 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2282 return getAddExpr(Ops);
2285 // Otherwise couldn't fold anything into this recurrence. Move onto the
2289 // Okay, it looks like we really DO need an add expr. Check to see if we
2290 // already have one, otherwise create a new one.
2291 FoldingSetNodeID ID;
2292 ID.AddInteger(scAddExpr);
2293 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2294 ID.AddPointer(Ops[i]);
2297 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2299 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2300 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2301 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
2303 UniqueSCEVs.InsertNode(S, IP);
2305 S->setNoWrapFlags(Flags);
2309 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2311 if (j > 1 && k / j != i) Overflow = true;
2315 /// Compute the result of "n choose k", the binomial coefficient. If an
2316 /// intermediate computation overflows, Overflow will be set and the return will
2317 /// be garbage. Overflow is not cleared on absence of overflow.
2318 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2319 // We use the multiplicative formula:
2320 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2321 // At each iteration, we take the n-th term of the numeral and divide by the
2322 // (k-n)th term of the denominator. This division will always produce an
2323 // integral result, and helps reduce the chance of overflow in the
2324 // intermediate computations. However, we can still overflow even when the
2325 // final result would fit.
2327 if (n == 0 || n == k) return 1;
2328 if (k > n) return 0;
2334 for (uint64_t i = 1; i <= k; ++i) {
2335 r = umul_ov(r, n-(i-1), Overflow);
2341 /// Determine if any of the operands in this SCEV are a constant or if
2342 /// any of the add or multiply expressions in this SCEV contain a constant.
2343 static bool containsConstantSomewhere(const SCEV *StartExpr) {
2344 SmallVector<const SCEV *, 4> Ops;
2345 Ops.push_back(StartExpr);
2346 while (!Ops.empty()) {
2347 const SCEV *CurrentExpr = Ops.pop_back_val();
2348 if (isa<SCEVConstant>(*CurrentExpr))
2351 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) {
2352 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr);
2353 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end());
2359 /// getMulExpr - Get a canonical multiply expression, or something simpler if
2361 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2362 SCEV::NoWrapFlags Flags) {
2363 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2364 "only nuw or nsw allowed");
2365 assert(!Ops.empty() && "Cannot get empty mul!");
2366 if (Ops.size() == 1) return Ops[0];
2368 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2369 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2370 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2371 "SCEVMulExpr operand types don't match!");
2374 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
2376 // Sort by complexity, this groups all similar expression types together.
2377 GroupByComplexity(Ops, LI);
2379 // If there are any constants, fold them together.
2381 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2383 // C1*(C2+V) -> C1*C2 + C1*V
2384 if (Ops.size() == 2)
2385 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2386 // If any of Add's ops are Adds or Muls with a constant,
2387 // apply this transformation as well.
2388 if (Add->getNumOperands() == 2)
2389 if (containsConstantSomewhere(Add))
2390 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
2391 getMulExpr(LHSC, Add->getOperand(1)));
2394 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2395 // We found two constants, fold them together!
2396 ConstantInt *Fold = ConstantInt::get(getContext(),
2397 LHSC->getValue()->getValue() *
2398 RHSC->getValue()->getValue());
2399 Ops[0] = getConstant(Fold);
2400 Ops.erase(Ops.begin()+1); // Erase the folded element
2401 if (Ops.size() == 1) return Ops[0];
2402 LHSC = cast<SCEVConstant>(Ops[0]);
2405 // If we are left with a constant one being multiplied, strip it off.
2406 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
2407 Ops.erase(Ops.begin());
2409 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
2410 // If we have a multiply of zero, it will always be zero.
2412 } else if (Ops[0]->isAllOnesValue()) {
2413 // If we have a mul by -1 of an add, try distributing the -1 among the
2415 if (Ops.size() == 2) {
2416 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2417 SmallVector<const SCEV *, 4> NewOps;
2418 bool AnyFolded = false;
2419 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
2420 E = Add->op_end(); I != E; ++I) {
2421 const SCEV *Mul = getMulExpr(Ops[0], *I);
2422 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2423 NewOps.push_back(Mul);
2426 return getAddExpr(NewOps);
2428 else if (const SCEVAddRecExpr *
2429 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2430 // Negation preserves a recurrence's no self-wrap property.
2431 SmallVector<const SCEV *, 4> Operands;
2432 for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
2433 E = AddRec->op_end(); I != E; ++I) {
2434 Operands.push_back(getMulExpr(Ops[0], *I));
2436 return getAddRecExpr(Operands, AddRec->getLoop(),
2437 AddRec->getNoWrapFlags(SCEV::FlagNW));
2442 if (Ops.size() == 1)
2446 // Skip over the add expression until we get to a multiply.
2447 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2450 // If there are mul operands inline them all into this expression.
2451 if (Idx < Ops.size()) {
2452 bool DeletedMul = false;
2453 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2454 // If we have an mul, expand the mul operands onto the end of the operands
2456 Ops.erase(Ops.begin()+Idx);
2457 Ops.append(Mul->op_begin(), Mul->op_end());
2461 // If we deleted at least one mul, we added operands to the end of the list,
2462 // and they are not necessarily sorted. Recurse to resort and resimplify
2463 // any operands we just acquired.
2465 return getMulExpr(Ops);
2468 // If there are any add recurrences in the operands list, see if any other
2469 // added values are loop invariant. If so, we can fold them into the
2471 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2474 // Scan over all recurrences, trying to fold loop invariants into them.
2475 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2476 // Scan all of the other operands to this mul and add them to the vector if
2477 // they are loop invariant w.r.t. the recurrence.
2478 SmallVector<const SCEV *, 8> LIOps;
2479 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2480 const Loop *AddRecLoop = AddRec->getLoop();
2481 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2482 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2483 LIOps.push_back(Ops[i]);
2484 Ops.erase(Ops.begin()+i);
2488 // If we found some loop invariants, fold them into the recurrence.
2489 if (!LIOps.empty()) {
2490 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2491 SmallVector<const SCEV *, 4> NewOps;
2492 NewOps.reserve(AddRec->getNumOperands());
2493 const SCEV *Scale = getMulExpr(LIOps);
2494 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2495 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2497 // Build the new addrec. Propagate the NUW and NSW flags if both the
2498 // outer mul and the inner addrec are guaranteed to have no overflow.
2500 // No self-wrap cannot be guaranteed after changing the step size, but
2501 // will be inferred if either NUW or NSW is true.
2502 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2503 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2505 // If all of the other operands were loop invariant, we are done.
2506 if (Ops.size() == 1) return NewRec;
2508 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2509 for (unsigned i = 0;; ++i)
2510 if (Ops[i] == AddRec) {
2514 return getMulExpr(Ops);
2517 // Okay, if there weren't any loop invariants to be folded, check to see if
2518 // there are multiple AddRec's with the same loop induction variable being
2519 // multiplied together. If so, we can fold them.
2521 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2522 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2523 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2524 // ]]],+,...up to x=2n}.
2525 // Note that the arguments to choose() are always integers with values
2526 // known at compile time, never SCEV objects.
2528 // The implementation avoids pointless extra computations when the two
2529 // addrec's are of different length (mathematically, it's equivalent to
2530 // an infinite stream of zeros on the right).
2531 bool OpsModified = false;
2532 for (unsigned OtherIdx = Idx+1;
2533 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2535 const SCEVAddRecExpr *OtherAddRec =
2536 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2537 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2540 bool Overflow = false;
2541 Type *Ty = AddRec->getType();
2542 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2543 SmallVector<const SCEV*, 7> AddRecOps;
2544 for (int x = 0, xe = AddRec->getNumOperands() +
2545 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2546 const SCEV *Term = getConstant(Ty, 0);
2547 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2548 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2549 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2550 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2551 z < ze && !Overflow; ++z) {
2552 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2554 if (LargerThan64Bits)
2555 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2557 Coeff = Coeff1*Coeff2;
2558 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2559 const SCEV *Term1 = AddRec->getOperand(y-z);
2560 const SCEV *Term2 = OtherAddRec->getOperand(z);
2561 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2564 AddRecOps.push_back(Term);
2567 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2569 if (Ops.size() == 2) return NewAddRec;
2570 Ops[Idx] = NewAddRec;
2571 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2573 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2579 return getMulExpr(Ops);
2581 // Otherwise couldn't fold anything into this recurrence. Move onto the
2585 // Okay, it looks like we really DO need an mul expr. Check to see if we
2586 // already have one, otherwise create a new one.
2587 FoldingSetNodeID ID;
2588 ID.AddInteger(scMulExpr);
2589 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2590 ID.AddPointer(Ops[i]);
2593 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2595 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2596 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2597 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2599 UniqueSCEVs.InsertNode(S, IP);
2601 S->setNoWrapFlags(Flags);
2605 /// getUDivExpr - Get a canonical unsigned division expression, or something
2606 /// simpler if possible.
2607 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2609 assert(getEffectiveSCEVType(LHS->getType()) ==
2610 getEffectiveSCEVType(RHS->getType()) &&
2611 "SCEVUDivExpr operand types don't match!");
2613 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2614 if (RHSC->getValue()->equalsInt(1))
2615 return LHS; // X udiv 1 --> x
2616 // If the denominator is zero, the result of the udiv is undefined. Don't
2617 // try to analyze it, because the resolution chosen here may differ from
2618 // the resolution chosen in other parts of the compiler.
2619 if (!RHSC->getValue()->isZero()) {
2620 // Determine if the division can be folded into the operands of
2622 // TODO: Generalize this to non-constants by using known-bits information.
2623 Type *Ty = LHS->getType();
2624 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
2625 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2626 // For non-power-of-two values, effectively round the value up to the
2627 // nearest power of two.
2628 if (!RHSC->getValue()->getValue().isPowerOf2())
2630 IntegerType *ExtTy =
2631 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2632 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2633 if (const SCEVConstant *Step =
2634 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2635 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2636 const APInt &StepInt = Step->getValue()->getValue();
2637 const APInt &DivInt = RHSC->getValue()->getValue();
2638 if (!StepInt.urem(DivInt) &&
2639 getZeroExtendExpr(AR, ExtTy) ==
2640 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2641 getZeroExtendExpr(Step, ExtTy),
2642 AR->getLoop(), SCEV::FlagAnyWrap)) {
2643 SmallVector<const SCEV *, 4> Operands;
2644 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2645 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
2646 return getAddRecExpr(Operands, AR->getLoop(),
2649 /// Get a canonical UDivExpr for a recurrence.
2650 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2651 // We can currently only fold X%N if X is constant.
2652 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2653 if (StartC && !DivInt.urem(StepInt) &&
2654 getZeroExtendExpr(AR, ExtTy) ==
2655 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2656 getZeroExtendExpr(Step, ExtTy),
2657 AR->getLoop(), SCEV::FlagAnyWrap)) {
2658 const APInt &StartInt = StartC->getValue()->getValue();
2659 const APInt &StartRem = StartInt.urem(StepInt);
2661 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2662 AR->getLoop(), SCEV::FlagNW);
2665 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2666 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2667 SmallVector<const SCEV *, 4> Operands;
2668 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2669 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2670 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2671 // Find an operand that's safely divisible.
2672 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2673 const SCEV *Op = M->getOperand(i);
2674 const SCEV *Div = getUDivExpr(Op, RHSC);
2675 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2676 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2679 return getMulExpr(Operands);
2683 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2684 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2685 SmallVector<const SCEV *, 4> Operands;
2686 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2687 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2688 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2690 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2691 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2692 if (isa<SCEVUDivExpr>(Op) ||
2693 getMulExpr(Op, RHS) != A->getOperand(i))
2695 Operands.push_back(Op);
2697 if (Operands.size() == A->getNumOperands())
2698 return getAddExpr(Operands);
2702 // Fold if both operands are constant.
2703 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2704 Constant *LHSCV = LHSC->getValue();
2705 Constant *RHSCV = RHSC->getValue();
2706 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2712 FoldingSetNodeID ID;
2713 ID.AddInteger(scUDivExpr);
2717 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2718 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2720 UniqueSCEVs.InsertNode(S, IP);
2724 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
2725 APInt A = C1->getValue()->getValue().abs();
2726 APInt B = C2->getValue()->getValue().abs();
2727 uint32_t ABW = A.getBitWidth();
2728 uint32_t BBW = B.getBitWidth();
2735 return APIntOps::GreatestCommonDivisor(A, B);
2738 /// getUDivExactExpr - Get a canonical unsigned division expression, or
2739 /// something simpler if possible. There is no representation for an exact udiv
2740 /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
2741 /// We can't do this when it's not exact because the udiv may be clearing bits.
2742 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
2744 // TODO: we could try to find factors in all sorts of things, but for now we
2745 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
2746 // end of this file for inspiration.
2748 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
2750 return getUDivExpr(LHS, RHS);
2752 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
2753 // If the mulexpr multiplies by a constant, then that constant must be the
2754 // first element of the mulexpr.
2755 if (const SCEVConstant *LHSCst =
2756 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2757 if (LHSCst == RHSCst) {
2758 SmallVector<const SCEV *, 2> Operands;
2759 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2760 return getMulExpr(Operands);
2763 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
2764 // that there's a factor provided by one of the other terms. We need to
2766 APInt Factor = gcd(LHSCst, RHSCst);
2767 if (!Factor.isIntN(1)) {
2768 LHSCst = cast<SCEVConstant>(
2769 getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
2770 RHSCst = cast<SCEVConstant>(
2771 getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
2772 SmallVector<const SCEV *, 2> Operands;
2773 Operands.push_back(LHSCst);
2774 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2775 LHS = getMulExpr(Operands);
2777 Mul = dyn_cast<SCEVMulExpr>(LHS);
2779 return getUDivExactExpr(LHS, RHS);
2784 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
2785 if (Mul->getOperand(i) == RHS) {
2786 SmallVector<const SCEV *, 2> Operands;
2787 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
2788 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
2789 return getMulExpr(Operands);
2793 return getUDivExpr(LHS, RHS);
2796 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2797 /// Simplify the expression as much as possible.
2798 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2800 SCEV::NoWrapFlags Flags) {
2801 SmallVector<const SCEV *, 4> Operands;
2802 Operands.push_back(Start);
2803 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2804 if (StepChrec->getLoop() == L) {
2805 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2806 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2809 Operands.push_back(Step);
2810 return getAddRecExpr(Operands, L, Flags);
2813 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2814 /// Simplify the expression as much as possible.
2816 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2817 const Loop *L, SCEV::NoWrapFlags Flags) {
2818 if (Operands.size() == 1) return Operands[0];
2820 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2821 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2822 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2823 "SCEVAddRecExpr operand types don't match!");
2824 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2825 assert(isLoopInvariant(Operands[i], L) &&
2826 "SCEVAddRecExpr operand is not loop-invariant!");
2829 if (Operands.back()->isZero()) {
2830 Operands.pop_back();
2831 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
2834 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2835 // use that information to infer NUW and NSW flags. However, computing a
2836 // BE count requires calling getAddRecExpr, so we may not yet have a
2837 // meaningful BE count at this point (and if we don't, we'd be stuck
2838 // with a SCEVCouldNotCompute as the cached BE count).
2840 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
2842 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2843 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2844 const Loop *NestedLoop = NestedAR->getLoop();
2845 if (L->contains(NestedLoop) ?
2846 (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2847 (!NestedLoop->contains(L) &&
2848 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2849 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2850 NestedAR->op_end());
2851 Operands[0] = NestedAR->getStart();
2852 // AddRecs require their operands be loop-invariant with respect to their
2853 // loops. Don't perform this transformation if it would break this
2855 bool AllInvariant = true;
2856 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2857 if (!isLoopInvariant(Operands[i], L)) {
2858 AllInvariant = false;
2862 // Create a recurrence for the outer loop with the same step size.
2864 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2865 // inner recurrence has the same property.
2866 SCEV::NoWrapFlags OuterFlags =
2867 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2869 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2870 AllInvariant = true;
2871 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2872 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2873 AllInvariant = false;
2877 // Ok, both add recurrences are valid after the transformation.
2879 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2880 // the outer recurrence has the same property.
2881 SCEV::NoWrapFlags InnerFlags =
2882 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2883 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2886 // Reset Operands to its original state.
2887 Operands[0] = NestedAR;
2891 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2892 // already have one, otherwise create a new one.
2893 FoldingSetNodeID ID;
2894 ID.AddInteger(scAddRecExpr);
2895 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2896 ID.AddPointer(Operands[i]);
2900 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2902 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2903 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2904 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2905 O, Operands.size(), L);
2906 UniqueSCEVs.InsertNode(S, IP);
2908 S->setNoWrapFlags(Flags);
2912 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2914 SmallVector<const SCEV *, 2> Ops;
2917 return getSMaxExpr(Ops);
2921 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2922 assert(!Ops.empty() && "Cannot get empty smax!");
2923 if (Ops.size() == 1) return Ops[0];
2925 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2926 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2927 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2928 "SCEVSMaxExpr operand types don't match!");
2931 // Sort by complexity, this groups all similar expression types together.
2932 GroupByComplexity(Ops, LI);
2934 // If there are any constants, fold them together.
2936 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2938 assert(Idx < Ops.size());
2939 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2940 // We found two constants, fold them together!
2941 ConstantInt *Fold = ConstantInt::get(getContext(),
2942 APIntOps::smax(LHSC->getValue()->getValue(),
2943 RHSC->getValue()->getValue()));
2944 Ops[0] = getConstant(Fold);
2945 Ops.erase(Ops.begin()+1); // Erase the folded element
2946 if (Ops.size() == 1) return Ops[0];
2947 LHSC = cast<SCEVConstant>(Ops[0]);
2950 // If we are left with a constant minimum-int, strip it off.
2951 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2952 Ops.erase(Ops.begin());
2954 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2955 // If we have an smax with a constant maximum-int, it will always be
2960 if (Ops.size() == 1) return Ops[0];
2963 // Find the first SMax
2964 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2967 // Check to see if one of the operands is an SMax. If so, expand its operands
2968 // onto our operand list, and recurse to simplify.
2969 if (Idx < Ops.size()) {
2970 bool DeletedSMax = false;
2971 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2972 Ops.erase(Ops.begin()+Idx);
2973 Ops.append(SMax->op_begin(), SMax->op_end());
2978 return getSMaxExpr(Ops);
2981 // Okay, check to see if the same value occurs in the operand list twice. If
2982 // so, delete one. Since we sorted the list, these values are required to
2984 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2985 // X smax Y smax Y --> X smax Y
2986 // X smax Y --> X, if X is always greater than Y
2987 if (Ops[i] == Ops[i+1] ||
2988 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2989 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2991 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2992 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2996 if (Ops.size() == 1) return Ops[0];
2998 assert(!Ops.empty() && "Reduced smax down to nothing!");
3000 // Okay, it looks like we really DO need an smax expr. Check to see if we
3001 // already have one, otherwise create a new one.
3002 FoldingSetNodeID ID;
3003 ID.AddInteger(scSMaxExpr);
3004 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3005 ID.AddPointer(Ops[i]);
3007 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3008 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3009 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3010 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
3012 UniqueSCEVs.InsertNode(S, IP);
3016 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
3018 SmallVector<const SCEV *, 2> Ops;
3021 return getUMaxExpr(Ops);
3025 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3026 assert(!Ops.empty() && "Cannot get empty umax!");
3027 if (Ops.size() == 1) return Ops[0];
3029 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3030 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3031 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3032 "SCEVUMaxExpr operand types don't match!");
3035 // Sort by complexity, this groups all similar expression types together.
3036 GroupByComplexity(Ops, LI);
3038 // If there are any constants, fold them together.
3040 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3042 assert(Idx < Ops.size());
3043 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3044 // We found two constants, fold them together!
3045 ConstantInt *Fold = ConstantInt::get(getContext(),
3046 APIntOps::umax(LHSC->getValue()->getValue(),
3047 RHSC->getValue()->getValue()));
3048 Ops[0] = getConstant(Fold);
3049 Ops.erase(Ops.begin()+1); // Erase the folded element
3050 if (Ops.size() == 1) return Ops[0];
3051 LHSC = cast<SCEVConstant>(Ops[0]);
3054 // If we are left with a constant minimum-int, strip it off.
3055 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
3056 Ops.erase(Ops.begin());
3058 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
3059 // If we have an umax with a constant maximum-int, it will always be
3064 if (Ops.size() == 1) return Ops[0];
3067 // Find the first UMax
3068 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
3071 // Check to see if one of the operands is a UMax. If so, expand its operands
3072 // onto our operand list, and recurse to simplify.
3073 if (Idx < Ops.size()) {
3074 bool DeletedUMax = false;
3075 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
3076 Ops.erase(Ops.begin()+Idx);
3077 Ops.append(UMax->op_begin(), UMax->op_end());
3082 return getUMaxExpr(Ops);
3085 // Okay, check to see if the same value occurs in the operand list twice. If
3086 // so, delete one. Since we sorted the list, these values are required to
3088 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
3089 // X umax Y umax Y --> X umax Y
3090 // X umax Y --> X, if X is always greater than Y
3091 if (Ops[i] == Ops[i+1] ||
3092 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
3093 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
3095 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
3096 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
3100 if (Ops.size() == 1) return Ops[0];
3102 assert(!Ops.empty() && "Reduced umax down to nothing!");
3104 // Okay, it looks like we really DO need a umax expr. Check to see if we
3105 // already have one, otherwise create a new one.
3106 FoldingSetNodeID ID;
3107 ID.AddInteger(scUMaxExpr);
3108 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3109 ID.AddPointer(Ops[i]);
3111 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3112 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3113 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3114 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
3116 UniqueSCEVs.InsertNode(S, IP);
3120 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3122 // ~smax(~x, ~y) == smin(x, y).
3123 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3126 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3128 // ~umax(~x, ~y) == umin(x, y)
3129 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3132 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3133 // If we have DataLayout, we can bypass creating a target-independent
3134 // constant expression and then folding it back into a ConstantInt.
3135 // This is just a compile-time optimization.
3137 return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
3139 Constant *C = ConstantExpr::getSizeOf(AllocTy);
3140 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
3141 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
3143 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
3144 assert(Ty == IntTy && "Effective SCEV type doesn't match");
3145 return getTruncateOrZeroExtend(getSCEV(C), Ty);
3148 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3151 // If we have DataLayout, we can bypass creating a target-independent
3152 // constant expression and then folding it back into a ConstantInt.
3153 // This is just a compile-time optimization.
3155 return getConstant(IntTy,
3156 DL->getStructLayout(STy)->getElementOffset(FieldNo));
3159 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
3160 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
3161 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
3164 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
3165 return getTruncateOrZeroExtend(getSCEV(C), Ty);
3168 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3169 // Don't attempt to do anything other than create a SCEVUnknown object
3170 // here. createSCEV only calls getUnknown after checking for all other
3171 // interesting possibilities, and any other code that calls getUnknown
3172 // is doing so in order to hide a value from SCEV canonicalization.
3174 FoldingSetNodeID ID;
3175 ID.AddInteger(scUnknown);
3178 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3179 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3180 "Stale SCEVUnknown in uniquing map!");
3183 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3185 FirstUnknown = cast<SCEVUnknown>(S);
3186 UniqueSCEVs.InsertNode(S, IP);
3190 //===----------------------------------------------------------------------===//
3191 // Basic SCEV Analysis and PHI Idiom Recognition Code
3194 /// isSCEVable - Test if values of the given type are analyzable within
3195 /// the SCEV framework. This primarily includes integer types, and it
3196 /// can optionally include pointer types if the ScalarEvolution class
3197 /// has access to target-specific information.
3198 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3199 // Integers and pointers are always SCEVable.
3200 return Ty->isIntegerTy() || Ty->isPointerTy();
3203 /// getTypeSizeInBits - Return the size in bits of the specified type,
3204 /// for which isSCEVable must return true.
3205 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3206 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3208 // If we have a DataLayout, use it!
3210 return DL->getTypeSizeInBits(Ty);
3212 // Integer types have fixed sizes.
3213 if (Ty->isIntegerTy())
3214 return Ty->getPrimitiveSizeInBits();
3216 // The only other support type is pointer. Without DataLayout, conservatively
3217 // assume pointers are 64-bit.
3218 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
3222 /// getEffectiveSCEVType - Return a type with the same bitwidth as
3223 /// the given type and which represents how SCEV will treat the given
3224 /// type, for which isSCEVable must return true. For pointer types,
3225 /// this is the pointer-sized integer type.
3226 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3227 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3229 if (Ty->isIntegerTy()) {
3233 // The only other support type is pointer.
3234 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3237 return DL->getIntPtrType(Ty);
3239 // Without DataLayout, conservatively assume pointers are 64-bit.
3240 return Type::getInt64Ty(getContext());
3243 const SCEV *ScalarEvolution::getCouldNotCompute() {
3244 return &CouldNotCompute;
3248 // Helper class working with SCEVTraversal to figure out if a SCEV contains
3249 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
3250 // is set iff if find such SCEVUnknown.
3252 struct FindInvalidSCEVUnknown {
3254 FindInvalidSCEVUnknown() { FindOne = false; }
3255 bool follow(const SCEV *S) {
3256 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
3260 if (!cast<SCEVUnknown>(S)->getValue())
3267 bool isDone() const { return FindOne; }
3271 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3272 FindInvalidSCEVUnknown F;
3273 SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
3279 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
3280 /// expression and create a new one.
3281 const SCEV *ScalarEvolution::getSCEV(Value *V) {
3282 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3284 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3285 if (I != ValueExprMap.end()) {
3286 const SCEV *S = I->second;
3287 if (checkValidity(S))
3290 ValueExprMap.erase(I);
3292 const SCEV *S = createSCEV(V);
3294 // The process of creating a SCEV for V may have caused other SCEVs
3295 // to have been created, so it's necessary to insert the new entry
3296 // from scratch, rather than trying to remember the insert position
3298 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
3302 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
3304 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
3305 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3307 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3309 Type *Ty = V->getType();
3310 Ty = getEffectiveSCEVType(Ty);
3311 return getMulExpr(V,
3312 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
3315 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
3316 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
3317 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3319 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3321 Type *Ty = V->getType();
3322 Ty = getEffectiveSCEVType(Ty);
3323 const SCEV *AllOnes =
3324 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
3325 return getMinusSCEV(AllOnes, V);
3328 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
3329 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3330 SCEV::NoWrapFlags Flags) {
3331 assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
3333 // Fast path: X - X --> 0.
3335 return getConstant(LHS->getType(), 0);
3337 // X - Y --> X + -Y.
3338 // X -(nsw || nuw) Y --> X + -Y.
3339 return getAddExpr(LHS, getNegativeSCEV(RHS));
3342 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
3343 /// input value to the specified type. If the type must be extended, it is zero
3346 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
3347 Type *SrcTy = V->getType();
3348 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3349 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3350 "Cannot truncate or zero extend with non-integer arguments!");
3351 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3352 return V; // No conversion
3353 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
3354 return getTruncateExpr(V, Ty);
3355 return getZeroExtendExpr(V, Ty);
3358 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
3359 /// input value to the specified type. If the type must be extended, it is sign
3362 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
3364 Type *SrcTy = V->getType();
3365 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3366 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3367 "Cannot truncate or zero extend with non-integer arguments!");
3368 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3369 return V; // No conversion
3370 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
3371 return getTruncateExpr(V, Ty);
3372 return getSignExtendExpr(V, Ty);
3375 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
3376 /// input value to the specified type. If the type must be extended, it is zero
3377 /// extended. The conversion must not be narrowing.
3379 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
3380 Type *SrcTy = V->getType();
3381 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3382 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3383 "Cannot noop or zero extend with non-integer arguments!");
3384 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3385 "getNoopOrZeroExtend cannot truncate!");
3386 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3387 return V; // No conversion
3388 return getZeroExtendExpr(V, Ty);
3391 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
3392 /// input value to the specified type. If the type must be extended, it is sign
3393 /// extended. The conversion must not be narrowing.
3395 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
3396 Type *SrcTy = V->getType();
3397 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3398 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3399 "Cannot noop or sign extend with non-integer arguments!");
3400 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3401 "getNoopOrSignExtend cannot truncate!");
3402 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3403 return V; // No conversion
3404 return getSignExtendExpr(V, Ty);
3407 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
3408 /// the input value to the specified type. If the type must be extended,
3409 /// it is extended with unspecified bits. The conversion must not be
3412 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
3413 Type *SrcTy = V->getType();
3414 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3415 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3416 "Cannot noop or any extend with non-integer arguments!");
3417 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
3418 "getNoopOrAnyExtend cannot truncate!");
3419 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3420 return V; // No conversion
3421 return getAnyExtendExpr(V, Ty);
3424 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
3425 /// input value to the specified type. The conversion must not be widening.
3427 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
3428 Type *SrcTy = V->getType();
3429 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
3430 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
3431 "Cannot truncate or noop with non-integer arguments!");
3432 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
3433 "getTruncateOrNoop cannot extend!");
3434 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
3435 return V; // No conversion
3436 return getTruncateExpr(V, Ty);
3439 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
3440 /// the types using zero-extension, and then perform a umax operation
3442 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
3444 const SCEV *PromotedLHS = LHS;
3445 const SCEV *PromotedRHS = RHS;
3447 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
3448 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
3450 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
3452 return getUMaxExpr(PromotedLHS, PromotedRHS);
3455 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
3456 /// the types using zero-extension, and then perform a umin operation
3458 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
3460 const SCEV *PromotedLHS = LHS;
3461 const SCEV *PromotedRHS = RHS;
3463 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
3464 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
3466 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
3468 return getUMinExpr(PromotedLHS, PromotedRHS);
3471 /// getPointerBase - Transitively follow the chain of pointer-type operands
3472 /// until reaching a SCEV that does not have a single pointer operand. This
3473 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
3474 /// but corner cases do exist.
3475 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
3476 // A pointer operand may evaluate to a nonpointer expression, such as null.
3477 if (!V->getType()->isPointerTy())
3480 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
3481 return getPointerBase(Cast->getOperand());
3483 else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
3484 const SCEV *PtrOp = nullptr;
3485 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
3487 if ((*I)->getType()->isPointerTy()) {
3488 // Cannot find the base of an expression with multiple pointer operands.
3496 return getPointerBase(PtrOp);
3501 /// PushDefUseChildren - Push users of the given Instruction
3502 /// onto the given Worklist.
3504 PushDefUseChildren(Instruction *I,
3505 SmallVectorImpl<Instruction *> &Worklist) {
3506 // Push the def-use children onto the Worklist stack.
3507 for (User *U : I->users())
3508 Worklist.push_back(cast<Instruction>(U));
3511 /// ForgetSymbolicValue - This looks up computed SCEV values for all
3512 /// instructions that depend on the given instruction and removes them from
3513 /// the ValueExprMapType map if they reference SymName. This is used during PHI
3516 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
3517 SmallVector<Instruction *, 16> Worklist;
3518 PushDefUseChildren(PN, Worklist);
3520 SmallPtrSet<Instruction *, 8> Visited;
3522 while (!Worklist.empty()) {
3523 Instruction *I = Worklist.pop_back_val();
3524 if (!Visited.insert(I).second)
3527 ValueExprMapType::iterator It =
3528 ValueExprMap.find_as(static_cast<Value *>(I));
3529 if (It != ValueExprMap.end()) {
3530 const SCEV *Old = It->second;
3532 // Short-circuit the def-use traversal if the symbolic name
3533 // ceases to appear in expressions.
3534 if (Old != SymName && !hasOperand(Old, SymName))
3537 // SCEVUnknown for a PHI either means that it has an unrecognized
3538 // structure, it's a PHI that's in the progress of being computed
3539 // by createNodeForPHI, or it's a single-value PHI. In the first case,
3540 // additional loop trip count information isn't going to change anything.
3541 // In the second case, createNodeForPHI will perform the necessary
3542 // updates on its own when it gets to that point. In the third, we do
3543 // want to forget the SCEVUnknown.
3544 if (!isa<PHINode>(I) ||
3545 !isa<SCEVUnknown>(Old) ||
3546 (I != PN && Old == SymName)) {
3547 forgetMemoizedResults(Old);
3548 ValueExprMap.erase(It);
3552 PushDefUseChildren(I, Worklist);
3556 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
3557 /// a loop header, making it a potential recurrence, or it doesn't.
3559 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
3560 if (const Loop *L = LI->getLoopFor(PN->getParent()))
3561 if (L->getHeader() == PN->getParent()) {
3562 // The loop may have multiple entrances or multiple exits; we can analyze
3563 // this phi as an addrec if it has a unique entry value and a unique
3565 Value *BEValueV = nullptr, *StartValueV = nullptr;
3566 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
3567 Value *V = PN->getIncomingValue(i);
3568 if (L->contains(PN->getIncomingBlock(i))) {
3571 } else if (BEValueV != V) {
3575 } else if (!StartValueV) {
3577 } else if (StartValueV != V) {
3578 StartValueV = nullptr;
3582 if (BEValueV && StartValueV) {
3583 // While we are analyzing this PHI node, handle its value symbolically.
3584 const SCEV *SymbolicName = getUnknown(PN);
3585 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
3586 "PHI node already processed?");
3587 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
3589 // Using this symbolic name for the PHI, analyze the value coming around
3591 const SCEV *BEValue = getSCEV(BEValueV);
3593 // NOTE: If BEValue is loop invariant, we know that the PHI node just
3594 // has a special value for the first iteration of the loop.
3596 // If the value coming around the backedge is an add with the symbolic
3597 // value we just inserted, then we found a simple induction variable!
3598 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
3599 // If there is a single occurrence of the symbolic value, replace it
3600 // with a recurrence.
3601 unsigned FoundIndex = Add->getNumOperands();
3602 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3603 if (Add->getOperand(i) == SymbolicName)
3604 if (FoundIndex == e) {
3609 if (FoundIndex != Add->getNumOperands()) {
3610 // Create an add with everything but the specified operand.
3611 SmallVector<const SCEV *, 8> Ops;
3612 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3613 if (i != FoundIndex)
3614 Ops.push_back(Add->getOperand(i));
3615 const SCEV *Accum = getAddExpr(Ops);
3617 // This is not a valid addrec if the step amount is varying each
3618 // loop iteration, but is not itself an addrec in this loop.
3619 if (isLoopInvariant(Accum, L) ||
3620 (isa<SCEVAddRecExpr>(Accum) &&
3621 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
3622 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
3624 // If the increment doesn't overflow, then neither the addrec nor
3625 // the post-increment will overflow.
3626 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3627 if (OBO->hasNoUnsignedWrap())
3628 Flags = setFlags(Flags, SCEV::FlagNUW);
3629 if (OBO->hasNoSignedWrap())
3630 Flags = setFlags(Flags, SCEV::FlagNSW);
3631 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
3632 // If the increment is an inbounds GEP, then we know the address
3633 // space cannot be wrapped around. We cannot make any guarantee
3634 // about signed or unsigned overflow because pointers are
3635 // unsigned but we may have a negative index from the base
3636 // pointer. We can guarantee that no unsigned wrap occurs if the
3637 // indices form a positive value.
3638 if (GEP->isInBounds()) {
3639 Flags = setFlags(Flags, SCEV::FlagNW);
3641 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
3642 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
3643 Flags = setFlags(Flags, SCEV::FlagNUW);
3646 // We cannot transfer nuw and nsw flags from subtraction
3647 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
3651 const SCEV *StartVal = getSCEV(StartValueV);
3652 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
3654 // Since the no-wrap flags are on the increment, they apply to the
3655 // post-incremented value as well.
3656 if (isLoopInvariant(Accum, L))
3657 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
3660 // Okay, for the entire analysis of this edge we assumed the PHI
3661 // to be symbolic. We now need to go back and purge all of the
3662 // entries for the scalars that use the symbolic expression.
3663 ForgetSymbolicName(PN, SymbolicName);
3664 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3668 } else if (const SCEVAddRecExpr *AddRec =
3669 dyn_cast<SCEVAddRecExpr>(BEValue)) {
3670 // Otherwise, this could be a loop like this:
3671 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
3672 // In this case, j = {1,+,1} and BEValue is j.
3673 // Because the other in-value of i (0) fits the evolution of BEValue
3674 // i really is an addrec evolution.
3675 if (AddRec->getLoop() == L && AddRec->isAffine()) {
3676 const SCEV *StartVal = getSCEV(StartValueV);
3678 // If StartVal = j.start - j.stride, we can use StartVal as the
3679 // initial step of the addrec evolution.
3680 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
3681 AddRec->getOperand(1))) {
3682 // FIXME: For constant StartVal, we should be able to infer
3684 const SCEV *PHISCEV =
3685 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3688 // Okay, for the entire analysis of this edge we assumed the PHI
3689 // to be symbolic. We now need to go back and purge all of the
3690 // entries for the scalars that use the symbolic expression.
3691 ForgetSymbolicName(PN, SymbolicName);
3692 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3700 // If the PHI has a single incoming value, follow that value, unless the
3701 // PHI's incoming blocks are in a different loop, in which case doing so
3702 // risks breaking LCSSA form. Instcombine would normally zap these, but
3703 // it doesn't have DominatorTree information, so it may miss cases.
3704 if (Value *V = SimplifyInstruction(PN, DL, TLI, DT, AC))
3705 if (LI->replacementPreservesLCSSAForm(PN, V))
3708 // If it's not a loop phi, we can't handle it yet.
3709 return getUnknown(PN);
3712 /// createNodeForGEP - Expand GEP instructions into add and multiply
3713 /// operations. This allows them to be analyzed by regular SCEV code.
3715 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
3716 Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
3717 Value *Base = GEP->getOperand(0);
3718 // Don't attempt to analyze GEPs over unsized objects.
3719 if (!Base->getType()->getPointerElementType()->isSized())
3720 return getUnknown(GEP);
3722 // Don't blindly transfer the inbounds flag from the GEP instruction to the
3723 // Add expression, because the Instruction may be guarded by control flow
3724 // and the no-overflow bits may not be valid for the expression in any
3726 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3728 const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
3729 gep_type_iterator GTI = gep_type_begin(GEP);
3730 for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
3734 // Compute the (potentially symbolic) offset in bytes for this index.
3735 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
3736 // For a struct, add the member offset.
3737 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
3738 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3740 // Add the field offset to the running total offset.
3741 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3743 // For an array, add the element offset, explicitly scaled.
3744 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
3745 const SCEV *IndexS = getSCEV(Index);
3746 // Getelementptr indices are signed.
3747 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3749 // Multiply the index by the element size to compute the element offset.
3750 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
3752 // Add the element offset to the running total offset.
3753 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3757 // Get the SCEV for the GEP base.
3758 const SCEV *BaseS = getSCEV(Base);
3760 // Add the total offset from all the GEP indices to the base.
3761 return getAddExpr(BaseS, TotalOffset, Wrap);
3764 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3765 /// guaranteed to end in (at every loop iteration). It is, at the same time,
3766 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
3767 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
3769 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
3770 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3771 return C->getValue()->getValue().countTrailingZeros();
3773 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
3774 return std::min(GetMinTrailingZeros(T->getOperand()),
3775 (uint32_t)getTypeSizeInBits(T->getType()));
3777 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
3778 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3779 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3780 getTypeSizeInBits(E->getType()) : OpRes;
3783 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
3784 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3785 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3786 getTypeSizeInBits(E->getType()) : OpRes;
3789 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
3790 // The result is the min of all operands results.
3791 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3792 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3793 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3797 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
3798 // The result is the sum of all operands results.
3799 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3800 uint32_t BitWidth = getTypeSizeInBits(M->getType());
3801 for (unsigned i = 1, e = M->getNumOperands();
3802 SumOpRes != BitWidth && i != e; ++i)
3803 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
3808 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
3809 // The result is the min of all operands results.
3810 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3811 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3812 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3816 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3817 // The result is the min of all operands results.
3818 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3819 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3820 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3824 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3825 // The result is the min of all operands results.
3826 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3827 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3828 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3832 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3833 // For a SCEVUnknown, ask ValueTracking.
3834 unsigned BitWidth = getTypeSizeInBits(U->getType());
3835 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3836 computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
3837 return Zeros.countTrailingOnes();
3844 /// GetRangeFromMetadata - Helper method to assign a range to V from
3845 /// metadata present in the IR.
3846 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
3847 if (Instruction *I = dyn_cast<Instruction>(V)) {
3848 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) {
3849 ConstantRange TotalRange(
3850 cast<IntegerType>(I->getType())->getBitWidth(), false);
3852 unsigned NumRanges = MD->getNumOperands() / 2;
3853 assert(NumRanges >= 1);
3855 for (unsigned i = 0; i < NumRanges; ++i) {
3856 ConstantInt *Lower =
3857 mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 0));
3858 ConstantInt *Upper =
3859 mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 1));
3860 ConstantRange Range(Lower->getValue(), Upper->getValue());
3861 TotalRange = TotalRange.unionWith(Range);
3871 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3874 ScalarEvolution::getUnsignedRange(const SCEV *S) {
3875 // See if we've computed this range already.
3876 DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3877 if (I != UnsignedRanges.end())
3880 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3881 return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
3883 unsigned BitWidth = getTypeSizeInBits(S->getType());
3884 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3886 // If the value has known zeros, the maximum unsigned value will have those
3887 // known zeros as well.
3888 uint32_t TZ = GetMinTrailingZeros(S);
3890 ConservativeResult =
3891 ConstantRange(APInt::getMinValue(BitWidth),
3892 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3894 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3895 ConstantRange X = getUnsignedRange(Add->getOperand(0));
3896 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3897 X = X.add(getUnsignedRange(Add->getOperand(i)));
3898 return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
3901 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3902 ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3903 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3904 X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3905 return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));