1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
38 //===----------------------------------------------------------------------===//
40 // There are several good references for the techniques used in this analysis.
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 // On computational properties of chains of recurrences
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
59 //===----------------------------------------------------------------------===//
61 #include "llvm/Analysis/ScalarEvolution.h"
62 #include "llvm/ADT/Optional.h"
63 #include "llvm/ADT/STLExtras.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/Analysis/AssumptionCache.h"
67 #include "llvm/Analysis/ConstantFolding.h"
68 #include "llvm/Analysis/InstructionSimplify.h"
69 #include "llvm/Analysis/LoopInfo.h"
70 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
71 #include "llvm/Analysis/TargetLibraryInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/GetElementPtrTypeIterator.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalVariable.h"
81 #include "llvm/IR/InstIterator.h"
82 #include "llvm/IR/Instructions.h"
83 #include "llvm/IR/LLVMContext.h"
84 #include "llvm/IR/Metadata.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/Support/CommandLine.h"
88 #include "llvm/Support/Debug.h"
89 #include "llvm/Support/ErrorHandling.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Support/SaveAndRestore.h"
96 #define DEBUG_TYPE "scalar-evolution"
98 STATISTIC(NumArrayLenItCounts,
99 "Number of trip counts computed with array length");
100 STATISTIC(NumTripCountsComputed,
101 "Number of loops with predictable loop counts");
102 STATISTIC(NumTripCountsNotComputed,
103 "Number of loops without predictable loop counts");
104 STATISTIC(NumBruteForceTripCountsComputed,
105 "Number of loops with trip counts computed by force");
107 static cl::opt<unsigned>
108 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
109 cl::desc("Maximum number of iterations SCEV will "
110 "symbolically execute a constant "
114 // FIXME: Enable this with XDEBUG when the test suite is clean.
116 VerifySCEV("verify-scev",
117 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
119 //===----------------------------------------------------------------------===//
120 // SCEV class definitions
121 //===----------------------------------------------------------------------===//
123 //===----------------------------------------------------------------------===//
124 // Implementation of the SCEV class.
128 void SCEV::dump() const {
133 void SCEV::print(raw_ostream &OS) const {
134 switch (static_cast<SCEVTypes>(getSCEVType())) {
136 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
139 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
140 const SCEV *Op = Trunc->getOperand();
141 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
142 << *Trunc->getType() << ")";
146 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
147 const SCEV *Op = ZExt->getOperand();
148 OS << "(zext " << *Op->getType() << " " << *Op << " to "
149 << *ZExt->getType() << ")";
153 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
154 const SCEV *Op = SExt->getOperand();
155 OS << "(sext " << *Op->getType() << " " << *Op << " to "
156 << *SExt->getType() << ")";
160 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
161 OS << "{" << *AR->getOperand(0);
162 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
163 OS << ",+," << *AR->getOperand(i);
165 if (AR->getNoWrapFlags(FlagNUW))
167 if (AR->getNoWrapFlags(FlagNSW))
169 if (AR->getNoWrapFlags(FlagNW) &&
170 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
172 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
180 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
181 const char *OpStr = nullptr;
182 switch (NAry->getSCEVType()) {
183 case scAddExpr: OpStr = " + "; break;
184 case scMulExpr: OpStr = " * "; break;
185 case scUMaxExpr: OpStr = " umax "; break;
186 case scSMaxExpr: OpStr = " smax "; break;
189 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
192 if (std::next(I) != E)
196 switch (NAry->getSCEVType()) {
199 if (NAry->getNoWrapFlags(FlagNUW))
201 if (NAry->getNoWrapFlags(FlagNSW))
207 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
208 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
212 const SCEVUnknown *U = cast<SCEVUnknown>(this);
214 if (U->isSizeOf(AllocTy)) {
215 OS << "sizeof(" << *AllocTy << ")";
218 if (U->isAlignOf(AllocTy)) {
219 OS << "alignof(" << *AllocTy << ")";
225 if (U->isOffsetOf(CTy, FieldNo)) {
226 OS << "offsetof(" << *CTy << ", ";
227 FieldNo->printAsOperand(OS, false);
232 // Otherwise just print it normally.
233 U->getValue()->printAsOperand(OS, false);
236 case scCouldNotCompute:
237 OS << "***COULDNOTCOMPUTE***";
240 llvm_unreachable("Unknown SCEV kind!");
243 Type *SCEV::getType() const {
244 switch (static_cast<SCEVTypes>(getSCEVType())) {
246 return cast<SCEVConstant>(this)->getType();
250 return cast<SCEVCastExpr>(this)->getType();
255 return cast<SCEVNAryExpr>(this)->getType();
257 return cast<SCEVAddExpr>(this)->getType();
259 return cast<SCEVUDivExpr>(this)->getType();
261 return cast<SCEVUnknown>(this)->getType();
262 case scCouldNotCompute:
263 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
265 llvm_unreachable("Unknown SCEV kind!");
268 bool SCEV::isZero() const {
269 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
270 return SC->getValue()->isZero();
274 bool SCEV::isOne() const {
275 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
276 return SC->getValue()->isOne();
280 bool SCEV::isAllOnesValue() const {
281 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
282 return SC->getValue()->isAllOnesValue();
286 /// isNonConstantNegative - Return true if the specified scev is negated, but
288 bool SCEV::isNonConstantNegative() const {
289 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
290 if (!Mul) return false;
292 // If there is a constant factor, it will be first.
293 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
294 if (!SC) return false;
296 // Return true if the value is negative, this matches things like (-42 * V).
297 return SC->getAPInt().isNegative();
300 SCEVCouldNotCompute::SCEVCouldNotCompute() :
301 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
303 bool SCEVCouldNotCompute::classof(const SCEV *S) {
304 return S->getSCEVType() == scCouldNotCompute;
307 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
309 ID.AddInteger(scConstant);
312 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
313 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
314 UniqueSCEVs.InsertNode(S, IP);
318 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
319 return getConstant(ConstantInt::get(getContext(), Val));
323 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
324 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
325 return getConstant(ConstantInt::get(ITy, V, isSigned));
328 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
329 unsigned SCEVTy, const SCEV *op, Type *ty)
330 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
332 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
333 const SCEV *op, Type *ty)
334 : SCEVCastExpr(ID, scTruncate, op, ty) {
335 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
336 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
337 "Cannot truncate non-integer value!");
340 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
341 const SCEV *op, Type *ty)
342 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
343 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
344 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
345 "Cannot zero extend non-integer value!");
348 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
349 const SCEV *op, Type *ty)
350 : SCEVCastExpr(ID, scSignExtend, op, ty) {
351 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
352 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
353 "Cannot sign extend non-integer value!");
356 void SCEVUnknown::deleted() {
357 // Clear this SCEVUnknown from various maps.
358 SE->forgetMemoizedResults(this);
360 // Remove this SCEVUnknown from the uniquing map.
361 SE->UniqueSCEVs.RemoveNode(this);
363 // Release the value.
367 void SCEVUnknown::allUsesReplacedWith(Value *New) {
368 // Clear this SCEVUnknown from various maps.
369 SE->forgetMemoizedResults(this);
371 // Remove this SCEVUnknown from the uniquing map.
372 SE->UniqueSCEVs.RemoveNode(this);
374 // Update this SCEVUnknown to point to the new value. This is needed
375 // because there may still be outstanding SCEVs which still point to
380 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
381 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
382 if (VCE->getOpcode() == Instruction::PtrToInt)
383 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
384 if (CE->getOpcode() == Instruction::GetElementPtr &&
385 CE->getOperand(0)->isNullValue() &&
386 CE->getNumOperands() == 2)
387 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
389 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
397 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
398 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
399 if (VCE->getOpcode() == Instruction::PtrToInt)
400 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
401 if (CE->getOpcode() == Instruction::GetElementPtr &&
402 CE->getOperand(0)->isNullValue()) {
404 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
405 if (StructType *STy = dyn_cast<StructType>(Ty))
406 if (!STy->isPacked() &&
407 CE->getNumOperands() == 3 &&
408 CE->getOperand(1)->isNullValue()) {
409 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
411 STy->getNumElements() == 2 &&
412 STy->getElementType(0)->isIntegerTy(1)) {
413 AllocTy = STy->getElementType(1);
422 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
423 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
424 if (VCE->getOpcode() == Instruction::PtrToInt)
425 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
426 if (CE->getOpcode() == Instruction::GetElementPtr &&
427 CE->getNumOperands() == 3 &&
428 CE->getOperand(0)->isNullValue() &&
429 CE->getOperand(1)->isNullValue()) {
431 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
432 // Ignore vector types here so that ScalarEvolutionExpander doesn't
433 // emit getelementptrs that index into vectors.
434 if (Ty->isStructTy() || Ty->isArrayTy()) {
436 FieldNo = CE->getOperand(2);
444 //===----------------------------------------------------------------------===//
446 //===----------------------------------------------------------------------===//
449 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
450 /// than the complexity of the RHS. This comparator is used to canonicalize
452 class SCEVComplexityCompare {
453 const LoopInfo *const LI;
455 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
457 // Return true or false if LHS is less than, or at least RHS, respectively.
458 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
459 return compare(LHS, RHS) < 0;
462 // Return negative, zero, or positive, if LHS is less than, equal to, or
463 // greater than RHS, respectively. A three-way result allows recursive
464 // comparisons to be more efficient.
465 int compare(const SCEV *LHS, const SCEV *RHS) const {
466 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
470 // Primarily, sort the SCEVs by their getSCEVType().
471 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
473 return (int)LType - (int)RType;
475 // Aside from the getSCEVType() ordering, the particular ordering
476 // isn't very important except that it's beneficial to be consistent,
477 // so that (a + b) and (b + a) don't end up as different expressions.
478 switch (static_cast<SCEVTypes>(LType)) {
480 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
481 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
483 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
484 // not as complete as it could be.
485 const Value *LV = LU->getValue(), *RV = RU->getValue();
487 // Order pointer values after integer values. This helps SCEVExpander
489 bool LIsPointer = LV->getType()->isPointerTy(),
490 RIsPointer = RV->getType()->isPointerTy();
491 if (LIsPointer != RIsPointer)
492 return (int)LIsPointer - (int)RIsPointer;
494 // Compare getValueID values.
495 unsigned LID = LV->getValueID(),
496 RID = RV->getValueID();
498 return (int)LID - (int)RID;
500 // Sort arguments by their position.
501 if (const Argument *LA = dyn_cast<Argument>(LV)) {
502 const Argument *RA = cast<Argument>(RV);
503 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
504 return (int)LArgNo - (int)RArgNo;
507 // For instructions, compare their loop depth, and their operand
508 // count. This is pretty loose.
509 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
510 const Instruction *RInst = cast<Instruction>(RV);
512 // Compare loop depths.
513 const BasicBlock *LParent = LInst->getParent(),
514 *RParent = RInst->getParent();
515 if (LParent != RParent) {
516 unsigned LDepth = LI->getLoopDepth(LParent),
517 RDepth = LI->getLoopDepth(RParent);
518 if (LDepth != RDepth)
519 return (int)LDepth - (int)RDepth;
522 // Compare the number of operands.
523 unsigned LNumOps = LInst->getNumOperands(),
524 RNumOps = RInst->getNumOperands();
525 return (int)LNumOps - (int)RNumOps;
532 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
533 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
535 // Compare constant values.
536 const APInt &LA = LC->getAPInt();
537 const APInt &RA = RC->getAPInt();
538 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
539 if (LBitWidth != RBitWidth)
540 return (int)LBitWidth - (int)RBitWidth;
541 return LA.ult(RA) ? -1 : 1;
545 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
546 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
548 // Compare addrec loop depths.
549 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
550 if (LLoop != RLoop) {
551 unsigned LDepth = LLoop->getLoopDepth(),
552 RDepth = RLoop->getLoopDepth();
553 if (LDepth != RDepth)
554 return (int)LDepth - (int)RDepth;
557 // Addrec complexity grows with operand count.
558 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
559 if (LNumOps != RNumOps)
560 return (int)LNumOps - (int)RNumOps;
562 // Lexicographically compare.
563 for (unsigned i = 0; i != LNumOps; ++i) {
564 long X = compare(LA->getOperand(i), RA->getOperand(i));
576 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
577 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
579 // Lexicographically compare n-ary expressions.
580 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
581 if (LNumOps != RNumOps)
582 return (int)LNumOps - (int)RNumOps;
584 for (unsigned i = 0; i != LNumOps; ++i) {
587 long X = compare(LC->getOperand(i), RC->getOperand(i));
591 return (int)LNumOps - (int)RNumOps;
595 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
596 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
598 // Lexicographically compare udiv expressions.
599 long X = compare(LC->getLHS(), RC->getLHS());
602 return compare(LC->getRHS(), RC->getRHS());
608 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
609 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
611 // Compare cast expressions by operand.
612 return compare(LC->getOperand(), RC->getOperand());
615 case scCouldNotCompute:
616 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
618 llvm_unreachable("Unknown SCEV kind!");
621 } // end anonymous namespace
623 /// GroupByComplexity - Given a list of SCEV objects, order them by their
624 /// complexity, and group objects of the same complexity together by value.
625 /// When this routine is finished, we know that any duplicates in the vector are
626 /// consecutive and that complexity is monotonically increasing.
628 /// Note that we go take special precautions to ensure that we get deterministic
629 /// results from this routine. In other words, we don't want the results of
630 /// this to depend on where the addresses of various SCEV objects happened to
633 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
635 if (Ops.size() < 2) return; // Noop
636 if (Ops.size() == 2) {
637 // This is the common case, which also happens to be trivially simple.
639 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
640 if (SCEVComplexityCompare(LI)(RHS, LHS))
645 // Do the rough sort by complexity.
646 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
648 // Now that we are sorted by complexity, group elements of the same
649 // complexity. Note that this is, at worst, N^2, but the vector is likely to
650 // be extremely short in practice. Note that we take this approach because we
651 // do not want to depend on the addresses of the objects we are grouping.
652 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
653 const SCEV *S = Ops[i];
654 unsigned Complexity = S->getSCEVType();
656 // If there are any objects of the same complexity and same value as this
658 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
659 if (Ops[j] == S) { // Found a duplicate.
660 // Move it to immediately after i'th element.
661 std::swap(Ops[i+1], Ops[j]);
662 ++i; // no need to rescan it.
663 if (i == e-2) return; // Done!
669 // Returns the size of the SCEV S.
670 static inline int sizeOfSCEV(const SCEV *S) {
671 struct FindSCEVSize {
673 FindSCEVSize() : Size(0) {}
675 bool follow(const SCEV *S) {
677 // Keep looking at all operands of S.
680 bool isDone() const {
686 SCEVTraversal<FindSCEVSize> ST(F);
693 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
695 // Computes the Quotient and Remainder of the division of Numerator by
697 static void divide(ScalarEvolution &SE, const SCEV *Numerator,
698 const SCEV *Denominator, const SCEV **Quotient,
699 const SCEV **Remainder) {
700 assert(Numerator && Denominator && "Uninitialized SCEV");
702 SCEVDivision D(SE, Numerator, Denominator);
704 // Check for the trivial case here to avoid having to check for it in the
706 if (Numerator == Denominator) {
712 if (Numerator->isZero()) {
718 // A simple case when N/1. The quotient is N.
719 if (Denominator->isOne()) {
720 *Quotient = Numerator;
725 // Split the Denominator when it is a product.
726 if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
728 *Quotient = Numerator;
729 for (const SCEV *Op : T->operands()) {
730 divide(SE, *Quotient, Op, &Q, &R);
733 // Bail out when the Numerator is not divisible by one of the terms of
737 *Remainder = Numerator;
746 *Quotient = D.Quotient;
747 *Remainder = D.Remainder;
750 // Except in the trivial case described above, we do not know how to divide
751 // Expr by Denominator for the following functions with empty implementation.
752 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
753 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
754 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
755 void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
756 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
757 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
758 void visitUnknown(const SCEVUnknown *Numerator) {}
759 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
761 void visitConstant(const SCEVConstant *Numerator) {
762 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
763 APInt NumeratorVal = Numerator->getAPInt();
764 APInt DenominatorVal = D->getAPInt();
765 uint32_t NumeratorBW = NumeratorVal.getBitWidth();
766 uint32_t DenominatorBW = DenominatorVal.getBitWidth();
768 if (NumeratorBW > DenominatorBW)
769 DenominatorVal = DenominatorVal.sext(NumeratorBW);
770 else if (NumeratorBW < DenominatorBW)
771 NumeratorVal = NumeratorVal.sext(DenominatorBW);
773 APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
774 APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
775 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
776 Quotient = SE.getConstant(QuotientVal);
777 Remainder = SE.getConstant(RemainderVal);
782 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
783 const SCEV *StartQ, *StartR, *StepQ, *StepR;
784 if (!Numerator->isAffine())
785 return cannotDivide(Numerator);
786 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
787 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
788 // Bail out if the types do not match.
789 Type *Ty = Denominator->getType();
790 if (Ty != StartQ->getType() || Ty != StartR->getType() ||
791 Ty != StepQ->getType() || Ty != StepR->getType())
792 return cannotDivide(Numerator);
793 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
794 Numerator->getNoWrapFlags());
795 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
796 Numerator->getNoWrapFlags());
799 void visitAddExpr(const SCEVAddExpr *Numerator) {
800 SmallVector<const SCEV *, 2> Qs, Rs;
801 Type *Ty = Denominator->getType();
803 for (const SCEV *Op : Numerator->operands()) {
805 divide(SE, Op, Denominator, &Q, &R);
807 // Bail out if types do not match.
808 if (Ty != Q->getType() || Ty != R->getType())
809 return cannotDivide(Numerator);
815 if (Qs.size() == 1) {
821 Quotient = SE.getAddExpr(Qs);
822 Remainder = SE.getAddExpr(Rs);
825 void visitMulExpr(const SCEVMulExpr *Numerator) {
826 SmallVector<const SCEV *, 2> Qs;
827 Type *Ty = Denominator->getType();
829 bool FoundDenominatorTerm = false;
830 for (const SCEV *Op : Numerator->operands()) {
831 // Bail out if types do not match.
832 if (Ty != Op->getType())
833 return cannotDivide(Numerator);
835 if (FoundDenominatorTerm) {
840 // Check whether Denominator divides one of the product operands.
842 divide(SE, Op, Denominator, &Q, &R);
848 // Bail out if types do not match.
849 if (Ty != Q->getType())
850 return cannotDivide(Numerator);
852 FoundDenominatorTerm = true;
856 if (FoundDenominatorTerm) {
861 Quotient = SE.getMulExpr(Qs);
865 if (!isa<SCEVUnknown>(Denominator))
866 return cannotDivide(Numerator);
868 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
869 ValueToValueMap RewriteMap;
870 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
871 cast<SCEVConstant>(Zero)->getValue();
872 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
874 if (Remainder->isZero()) {
875 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
876 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
877 cast<SCEVConstant>(One)->getValue();
879 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
883 // Quotient is (Numerator - Remainder) divided by Denominator.
885 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
886 // This SCEV does not seem to simplify: fail the division here.
887 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator))
888 return cannotDivide(Numerator);
889 divide(SE, Diff, Denominator, &Q, &R);
891 return cannotDivide(Numerator);
896 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
897 const SCEV *Denominator)
898 : SE(S), Denominator(Denominator) {
899 Zero = SE.getZero(Denominator->getType());
900 One = SE.getOne(Denominator->getType());
902 // We generally do not know how to divide Expr by Denominator. We
903 // initialize the division to a "cannot divide" state to simplify the rest
905 cannotDivide(Numerator);
908 // Convenience function for giving up on the division. We set the quotient to
909 // be equal to zero and the remainder to be equal to the numerator.
910 void cannotDivide(const SCEV *Numerator) {
912 Remainder = Numerator;
916 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
921 //===----------------------------------------------------------------------===//
922 // Simple SCEV method implementations
923 //===----------------------------------------------------------------------===//
925 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
927 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
930 // Handle the simplest case efficiently.
932 return SE.getTruncateOrZeroExtend(It, ResultTy);
934 // We are using the following formula for BC(It, K):
936 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
938 // Suppose, W is the bitwidth of the return value. We must be prepared for
939 // overflow. Hence, we must assure that the result of our computation is
940 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
941 // safe in modular arithmetic.
943 // However, this code doesn't use exactly that formula; the formula it uses
944 // is something like the following, where T is the number of factors of 2 in
945 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
948 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
950 // This formula is trivially equivalent to the previous formula. However,
951 // this formula can be implemented much more efficiently. The trick is that
952 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
953 // arithmetic. To do exact division in modular arithmetic, all we have
954 // to do is multiply by the inverse. Therefore, this step can be done at
957 // The next issue is how to safely do the division by 2^T. The way this
958 // is done is by doing the multiplication step at a width of at least W + T
959 // bits. This way, the bottom W+T bits of the product are accurate. Then,
960 // when we perform the division by 2^T (which is equivalent to a right shift
961 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
962 // truncated out after the division by 2^T.
964 // In comparison to just directly using the first formula, this technique
965 // is much more efficient; using the first formula requires W * K bits,
966 // but this formula less than W + K bits. Also, the first formula requires
967 // a division step, whereas this formula only requires multiplies and shifts.
969 // It doesn't matter whether the subtraction step is done in the calculation
970 // width or the input iteration count's width; if the subtraction overflows,
971 // the result must be zero anyway. We prefer here to do it in the width of
972 // the induction variable because it helps a lot for certain cases; CodeGen
973 // isn't smart enough to ignore the overflow, which leads to much less
974 // efficient code if the width of the subtraction is wider than the native
977 // (It's possible to not widen at all by pulling out factors of 2 before
978 // the multiplication; for example, K=2 can be calculated as
979 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
980 // extra arithmetic, so it's not an obvious win, and it gets
981 // much more complicated for K > 3.)
983 // Protection from insane SCEVs; this bound is conservative,
984 // but it probably doesn't matter.
986 return SE.getCouldNotCompute();
988 unsigned W = SE.getTypeSizeInBits(ResultTy);
990 // Calculate K! / 2^T and T; we divide out the factors of two before
991 // multiplying for calculating K! / 2^T to avoid overflow.
992 // Other overflow doesn't matter because we only care about the bottom
993 // W bits of the result.
994 APInt OddFactorial(W, 1);
996 for (unsigned i = 3; i <= K; ++i) {
998 unsigned TwoFactors = Mult.countTrailingZeros();
1000 Mult = Mult.lshr(TwoFactors);
1001 OddFactorial *= Mult;
1004 // We need at least W + T bits for the multiplication step
1005 unsigned CalculationBits = W + T;
1007 // Calculate 2^T, at width T+W.
1008 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
1010 // Calculate the multiplicative inverse of K! / 2^T;
1011 // this multiplication factor will perform the exact division by
1013 APInt Mod = APInt::getSignedMinValue(W+1);
1014 APInt MultiplyFactor = OddFactorial.zext(W+1);
1015 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
1016 MultiplyFactor = MultiplyFactor.trunc(W);
1018 // Calculate the product, at width T+W
1019 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1021 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1022 for (unsigned i = 1; i != K; ++i) {
1023 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1024 Dividend = SE.getMulExpr(Dividend,
1025 SE.getTruncateOrZeroExtend(S, CalculationTy));
1029 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1031 // Truncate the result, and divide by K! / 2^T.
1033 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1034 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1037 /// evaluateAtIteration - Return the value of this chain of recurrences at
1038 /// the specified iteration number. We can evaluate this recurrence by
1039 /// multiplying each element in the chain by the binomial coefficient
1040 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
1042 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1044 /// where BC(It, k) stands for binomial coefficient.
1046 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1047 ScalarEvolution &SE) const {
1048 const SCEV *Result = getStart();
1049 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1050 // The computation is correct in the face of overflow provided that the
1051 // multiplication is performed _after_ the evaluation of the binomial
1053 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1054 if (isa<SCEVCouldNotCompute>(Coeff))
1057 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1062 //===----------------------------------------------------------------------===//
1063 // SCEV Expression folder implementations
1064 //===----------------------------------------------------------------------===//
1066 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
1068 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1069 "This is not a truncating conversion!");
1070 assert(isSCEVable(Ty) &&
1071 "This is not a conversion to a SCEVable type!");
1072 Ty = getEffectiveSCEVType(Ty);
1074 FoldingSetNodeID ID;
1075 ID.AddInteger(scTruncate);
1079 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1081 // Fold if the operand is constant.
1082 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1084 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1086 // trunc(trunc(x)) --> trunc(x)
1087 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1088 return getTruncateExpr(ST->getOperand(), Ty);
1090 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1091 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1092 return getTruncateOrSignExtend(SS->getOperand(), Ty);
1094 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1095 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1096 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
1098 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
1099 // eliminate all the truncates, or we replace other casts with truncates.
1100 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
1101 SmallVector<const SCEV *, 4> Operands;
1102 bool hasTrunc = false;
1103 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
1104 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
1105 if (!isa<SCEVCastExpr>(SA->getOperand(i)))
1106 hasTrunc = isa<SCEVTruncateExpr>(S);
1107 Operands.push_back(S);
1110 return getAddExpr(Operands);
1111 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1114 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
1115 // eliminate all the truncates, or we replace other casts with truncates.
1116 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
1117 SmallVector<const SCEV *, 4> Operands;
1118 bool hasTrunc = false;
1119 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
1120 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
1121 if (!isa<SCEVCastExpr>(SM->getOperand(i)))
1122 hasTrunc = isa<SCEVTruncateExpr>(S);
1123 Operands.push_back(S);
1126 return getMulExpr(Operands);
1127 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
1130 // If the input value is a chrec scev, truncate the chrec's operands.
1131 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1132 SmallVector<const SCEV *, 4> Operands;
1133 for (const SCEV *Op : AddRec->operands())
1134 Operands.push_back(getTruncateExpr(Op, Ty));
1135 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1138 // The cast wasn't folded; create an explicit cast node. We can reuse
1139 // the existing insert position since if we get here, we won't have
1140 // made any changes which would invalidate it.
1141 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1143 UniqueSCEVs.InsertNode(S, IP);
1147 // Get the limit of a recurrence such that incrementing by Step cannot cause
1148 // signed overflow as long as the value of the recurrence within the
1149 // loop does not exceed this limit before incrementing.
1150 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1151 ICmpInst::Predicate *Pred,
1152 ScalarEvolution *SE) {
1153 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1154 if (SE->isKnownPositive(Step)) {
1155 *Pred = ICmpInst::ICMP_SLT;
1156 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1157 SE->getSignedRange(Step).getSignedMax());
1159 if (SE->isKnownNegative(Step)) {
1160 *Pred = ICmpInst::ICMP_SGT;
1161 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1162 SE->getSignedRange(Step).getSignedMin());
1167 // Get the limit of a recurrence such that incrementing by Step cannot cause
1168 // unsigned overflow as long as the value of the recurrence within the loop does
1169 // not exceed this limit before incrementing.
1170 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1171 ICmpInst::Predicate *Pred,
1172 ScalarEvolution *SE) {
1173 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1174 *Pred = ICmpInst::ICMP_ULT;
1176 return SE->getConstant(APInt::getMinValue(BitWidth) -
1177 SE->getUnsignedRange(Step).getUnsignedMax());
1182 struct ExtendOpTraitsBase {
1183 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
1186 // Used to make code generic over signed and unsigned overflow.
1187 template <typename ExtendOp> struct ExtendOpTraits {
1190 // static const SCEV::NoWrapFlags WrapType;
1192 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1194 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1195 // ICmpInst::Predicate *Pred,
1196 // ScalarEvolution *SE);
1200 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1201 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1203 static const GetExtendExprTy GetExtendExpr;
1205 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1206 ICmpInst::Predicate *Pred,
1207 ScalarEvolution *SE) {
1208 return getSignedOverflowLimitForStep(Step, Pred, SE);
1212 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1213 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1216 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1217 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1219 static const GetExtendExprTy GetExtendExpr;
1221 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1222 ICmpInst::Predicate *Pred,
1223 ScalarEvolution *SE) {
1224 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1228 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1229 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1232 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1233 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1234 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1235 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1236 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1237 // expression "Step + sext/zext(PreIncAR)" is congruent with
1238 // "sext/zext(PostIncAR)"
1239 template <typename ExtendOpTy>
1240 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1241 ScalarEvolution *SE) {
1242 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1243 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1245 const Loop *L = AR->getLoop();
1246 const SCEV *Start = AR->getStart();
1247 const SCEV *Step = AR->getStepRecurrence(*SE);
1249 // Check for a simple looking step prior to loop entry.
1250 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1254 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1255 // subtraction is expensive. For this purpose, perform a quick and dirty
1256 // difference, by checking for Step in the operand list.
1257 SmallVector<const SCEV *, 4> DiffOps;
1258 for (const SCEV *Op : SA->operands())
1260 DiffOps.push_back(Op);
1262 if (DiffOps.size() == SA->getNumOperands())
1265 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1268 // 1. NSW/NUW flags on the step increment.
1269 auto PreStartFlags =
1270 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1271 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1272 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1273 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1275 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1276 // "S+X does not sign/unsign-overflow".
1279 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1280 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1281 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1284 // 2. Direct overflow check on the step operation's expression.
1285 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1286 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1287 const SCEV *OperandExtendedStart =
1288 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
1289 (SE->*GetExtendExpr)(Step, WideTy));
1290 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
1291 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1292 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1293 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1294 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1295 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
1300 // 3. Loop precondition.
1301 ICmpInst::Predicate Pred;
1302 const SCEV *OverflowLimit =
1303 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1305 if (OverflowLimit &&
1306 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1312 // Get the normalized zero or sign extended expression for this AddRec's Start.
1313 template <typename ExtendOpTy>
1314 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1315 ScalarEvolution *SE) {
1316 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1318 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
1320 return (SE->*GetExtendExpr)(AR->getStart(), Ty);
1322 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
1323 (SE->*GetExtendExpr)(PreStart, Ty));
1326 // Try to prove away overflow by looking at "nearby" add recurrences. A
1327 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1328 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1332 // {S,+,X} == {S-T,+,X} + T
1333 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1335 // If ({S-T,+,X} + T) does not overflow ... (1)
1337 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1339 // If {S-T,+,X} does not overflow ... (2)
1341 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1342 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1344 // If (S-T)+T does not overflow ... (3)
1346 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1347 // == {Ext(S),+,Ext(X)} == LHS
1349 // Thus, if (1), (2) and (3) are true for some T, then
1350 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1352 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1353 // does not overflow" restricted to the 0th iteration. Therefore we only need
1354 // to check for (1) and (2).
1356 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1357 // is `Delta` (defined below).
1359 template <typename ExtendOpTy>
1360 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1363 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1365 // We restrict `Start` to a constant to prevent SCEV from spending too much
1366 // time here. It is correct (but more expensive) to continue with a
1367 // non-constant `Start` and do a general SCEV subtraction to compute
1368 // `PreStart` below.
1370 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1374 APInt StartAI = StartC->getAPInt();
1376 for (unsigned Delta : {-2, -1, 1, 2}) {
1377 const SCEV *PreStart = getConstant(StartAI - Delta);
1379 FoldingSetNodeID ID;
1380 ID.AddInteger(scAddRecExpr);
1381 ID.AddPointer(PreStart);
1382 ID.AddPointer(Step);
1386 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1388 // Give up if we don't already have the add recurrence we need because
1389 // actually constructing an add recurrence is relatively expensive.
1390 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1391 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1392 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1393 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1394 DeltaS, &Pred, this);
1395 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1403 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
1405 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1406 "This is not an extending conversion!");
1407 assert(isSCEVable(Ty) &&
1408 "This is not a conversion to a SCEVable type!");
1409 Ty = getEffectiveSCEVType(Ty);
1411 // Fold if the operand is constant.
1412 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1414 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1416 // zext(zext(x)) --> zext(x)
1417 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1418 return getZeroExtendExpr(SZ->getOperand(), Ty);
1420 // Before doing any expensive analysis, check to see if we've already
1421 // computed a SCEV for this Op and Ty.
1422 FoldingSetNodeID ID;
1423 ID.AddInteger(scZeroExtend);
1427 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1429 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1430 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1431 // It's possible the bits taken off by the truncate were all zero bits. If
1432 // so, we should be able to simplify this further.
1433 const SCEV *X = ST->getOperand();
1434 ConstantRange CR = getUnsignedRange(X);
1435 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1436 unsigned NewBits = getTypeSizeInBits(Ty);
1437 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1438 CR.zextOrTrunc(NewBits)))
1439 return getTruncateOrZeroExtend(X, Ty);
1442 // If the input value is a chrec scev, and we can prove that the value
1443 // did not overflow the old, smaller, value, we can zero extend all of the
1444 // operands (often constants). This allows analysis of something like
1445 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1446 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1447 if (AR->isAffine()) {
1448 const SCEV *Start = AR->getStart();
1449 const SCEV *Step = AR->getStepRecurrence(*this);
1450 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1451 const Loop *L = AR->getLoop();
1453 // If we have special knowledge that this addrec won't overflow,
1454 // we don't need to do any further analysis.
1455 if (AR->getNoWrapFlags(SCEV::FlagNUW))
1456 return getAddRecExpr(
1457 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1458 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1460 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1461 // Note that this serves two purposes: It filters out loops that are
1462 // simply not analyzable, and it covers the case where this code is
1463 // being called from within backedge-taken count analysis, such that
1464 // attempting to ask for the backedge-taken count would likely result
1465 // in infinite recursion. In the later case, the analysis code will
1466 // cope with a conservative value, and it will take care to purge
1467 // that value once it has finished.
1468 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1469 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1470 // Manually compute the final value for AR, checking for
1473 // Check whether the backedge-taken count can be losslessly casted to
1474 // the addrec's type. The count is always unsigned.
1475 const SCEV *CastedMaxBECount =
1476 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1477 const SCEV *RecastedMaxBECount =
1478 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1479 if (MaxBECount == RecastedMaxBECount) {
1480 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1481 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1482 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
1483 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
1484 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
1485 const SCEV *WideMaxBECount =
1486 getZeroExtendExpr(CastedMaxBECount, WideTy);
1487 const SCEV *OperandExtendedAdd =
1488 getAddExpr(WideStart,
1489 getMulExpr(WideMaxBECount,
1490 getZeroExtendExpr(Step, WideTy)));
1491 if (ZAdd == OperandExtendedAdd) {
1492 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1493 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1494 // Return the expression with the addrec on the outside.
1495 return getAddRecExpr(
1496 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1497 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1499 // Similar to above, only this time treat the step value as signed.
1500 // This covers loops that count down.
1501 OperandExtendedAdd =
1502 getAddExpr(WideStart,
1503 getMulExpr(WideMaxBECount,
1504 getSignExtendExpr(Step, WideTy)));
1505 if (ZAdd == OperandExtendedAdd) {
1506 // Cache knowledge of AR NW, which is propagated to this AddRec.
1507 // Negative step causes unsigned wrap, but it still can't self-wrap.
1508 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1509 // Return the expression with the addrec on the outside.
1510 return getAddRecExpr(
1511 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1512 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1516 // If the backedge is guarded by a comparison with the pre-inc value
1517 // the addrec is safe. Also, if the entry is guarded by a comparison
1518 // with the start value and the backedge is guarded by a comparison
1519 // with the post-inc value, the addrec is safe.
1520 if (isKnownPositive(Step)) {
1521 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1522 getUnsignedRange(Step).getUnsignedMax());
1523 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1524 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1525 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1526 AR->getPostIncExpr(*this), N))) {
1527 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1528 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1529 // Return the expression with the addrec on the outside.
1530 return getAddRecExpr(
1531 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1532 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1534 } else if (isKnownNegative(Step)) {
1535 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1536 getSignedRange(Step).getSignedMin());
1537 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1538 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1539 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1540 AR->getPostIncExpr(*this), N))) {
1541 // Cache knowledge of AR NW, which is propagated to this AddRec.
1542 // Negative step causes unsigned wrap, but it still can't self-wrap.
1543 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1544 // Return the expression with the addrec on the outside.
1545 return getAddRecExpr(
1546 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1547 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1552 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1553 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1554 return getAddRecExpr(
1555 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
1556 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1560 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1561 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1562 if (SA->getNoWrapFlags(SCEV::FlagNUW)) {
1563 // If the addition does not unsign overflow then we can, by definition,
1564 // commute the zero extension with the addition operation.
1565 SmallVector<const SCEV *, 4> Ops;
1566 for (const auto *Op : SA->operands())
1567 Ops.push_back(getZeroExtendExpr(Op, Ty));
1568 return getAddExpr(Ops, SCEV::FlagNUW);
1572 // The cast wasn't folded; create an explicit cast node.
1573 // Recompute the insert position, as it may have been invalidated.
1574 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1575 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1577 UniqueSCEVs.InsertNode(S, IP);
1581 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1583 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1584 "This is not an extending conversion!");
1585 assert(isSCEVable(Ty) &&
1586 "This is not a conversion to a SCEVable type!");
1587 Ty = getEffectiveSCEVType(Ty);
1589 // Fold if the operand is constant.
1590 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1592 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1594 // sext(sext(x)) --> sext(x)
1595 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1596 return getSignExtendExpr(SS->getOperand(), Ty);
1598 // sext(zext(x)) --> zext(x)
1599 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1600 return getZeroExtendExpr(SZ->getOperand(), Ty);
1602 // Before doing any expensive analysis, check to see if we've already
1603 // computed a SCEV for this Op and Ty.
1604 FoldingSetNodeID ID;
1605 ID.AddInteger(scSignExtend);
1609 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1611 // If the input value is provably positive, build a zext instead.
1612 if (isKnownNonNegative(Op))
1613 return getZeroExtendExpr(Op, Ty);
1615 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1616 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1617 // It's possible the bits taken off by the truncate were all sign bits. If
1618 // so, we should be able to simplify this further.
1619 const SCEV *X = ST->getOperand();
1620 ConstantRange CR = getSignedRange(X);
1621 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1622 unsigned NewBits = getTypeSizeInBits(Ty);
1623 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1624 CR.sextOrTrunc(NewBits)))
1625 return getTruncateOrSignExtend(X, Ty);
1628 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
1629 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1630 if (SA->getNumOperands() == 2) {
1631 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
1632 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
1634 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
1635 const APInt &C1 = SC1->getAPInt();
1636 const APInt &C2 = SC2->getAPInt();
1637 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
1638 C2.ugt(C1) && C2.isPowerOf2())
1639 return getAddExpr(getSignExtendExpr(SC1, Ty),
1640 getSignExtendExpr(SMul, Ty));
1645 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1646 if (SA->getNoWrapFlags(SCEV::FlagNSW)) {
1647 // If the addition does not sign overflow then we can, by definition,
1648 // commute the sign extension with the addition operation.
1649 SmallVector<const SCEV *, 4> Ops;
1650 for (const auto *Op : SA->operands())
1651 Ops.push_back(getSignExtendExpr(Op, Ty));
1652 return getAddExpr(Ops, SCEV::FlagNSW);
1655 // If the input value is a chrec scev, and we can prove that the value
1656 // did not overflow the old, smaller, value, we can sign extend all of the
1657 // operands (often constants). This allows analysis of something like
1658 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1659 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1660 if (AR->isAffine()) {
1661 const SCEV *Start = AR->getStart();
1662 const SCEV *Step = AR->getStepRecurrence(*this);
1663 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1664 const Loop *L = AR->getLoop();
1666 // If we have special knowledge that this addrec won't overflow,
1667 // we don't need to do any further analysis.
1668 if (AR->getNoWrapFlags(SCEV::FlagNSW))
1669 return getAddRecExpr(
1670 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1671 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
1673 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1674 // Note that this serves two purposes: It filters out loops that are
1675 // simply not analyzable, and it covers the case where this code is
1676 // being called from within backedge-taken count analysis, such that
1677 // attempting to ask for the backedge-taken count would likely result
1678 // in infinite recursion. In the later case, the analysis code will
1679 // cope with a conservative value, and it will take care to purge
1680 // that value once it has finished.
1681 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1682 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1683 // Manually compute the final value for AR, checking for
1686 // Check whether the backedge-taken count can be losslessly casted to
1687 // the addrec's type. The count is always unsigned.
1688 const SCEV *CastedMaxBECount =
1689 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1690 const SCEV *RecastedMaxBECount =
1691 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1692 if (MaxBECount == RecastedMaxBECount) {
1693 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1694 // Check whether Start+Step*MaxBECount has no signed overflow.
1695 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1696 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1697 const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1698 const SCEV *WideMaxBECount =
1699 getZeroExtendExpr(CastedMaxBECount, WideTy);
1700 const SCEV *OperandExtendedAdd =
1701 getAddExpr(WideStart,
1702 getMulExpr(WideMaxBECount,
1703 getSignExtendExpr(Step, WideTy)));
1704 if (SAdd == OperandExtendedAdd) {
1705 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1706 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1707 // Return the expression with the addrec on the outside.
1708 return getAddRecExpr(
1709 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1710 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1712 // Similar to above, only this time treat the step value as unsigned.
1713 // This covers loops that count up with an unsigned step.
1714 OperandExtendedAdd =
1715 getAddExpr(WideStart,
1716 getMulExpr(WideMaxBECount,
1717 getZeroExtendExpr(Step, WideTy)));
1718 if (SAdd == OperandExtendedAdd) {
1719 // If AR wraps around then
1721 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
1722 // => SAdd != OperandExtendedAdd
1724 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
1725 // (SAdd == OperandExtendedAdd => AR is NW)
1727 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1729 // Return the expression with the addrec on the outside.
1730 return getAddRecExpr(
1731 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1732 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1736 // If the backedge is guarded by a comparison with the pre-inc value
1737 // the addrec is safe. Also, if the entry is guarded by a comparison
1738 // with the start value and the backedge is guarded by a comparison
1739 // with the post-inc value, the addrec is safe.
1740 ICmpInst::Predicate Pred;
1741 const SCEV *OverflowLimit =
1742 getSignedOverflowLimitForStep(Step, &Pred, this);
1743 if (OverflowLimit &&
1744 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1745 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1746 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1748 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1749 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1750 return getAddRecExpr(
1751 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1752 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1755 // If Start and Step are constants, check if we can apply this
1757 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
1758 auto *SC1 = dyn_cast<SCEVConstant>(Start);
1759 auto *SC2 = dyn_cast<SCEVConstant>(Step);
1761 const APInt &C1 = SC1->getAPInt();
1762 const APInt &C2 = SC2->getAPInt();
1763 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
1765 Start = getSignExtendExpr(Start, Ty);
1766 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L,
1767 AR->getNoWrapFlags());
1768 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
1772 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
1773 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1774 return getAddRecExpr(
1775 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
1776 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
1780 // The cast wasn't folded; create an explicit cast node.
1781 // Recompute the insert position, as it may have been invalidated.
1782 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1783 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1785 UniqueSCEVs.InsertNode(S, IP);
1789 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1790 /// unspecified bits out to the given type.
1792 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1794 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1795 "This is not an extending conversion!");
1796 assert(isSCEVable(Ty) &&
1797 "This is not a conversion to a SCEVable type!");
1798 Ty = getEffectiveSCEVType(Ty);
1800 // Sign-extend negative constants.
1801 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1802 if (SC->getAPInt().isNegative())
1803 return getSignExtendExpr(Op, Ty);
1805 // Peel off a truncate cast.
1806 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1807 const SCEV *NewOp = T->getOperand();
1808 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1809 return getAnyExtendExpr(NewOp, Ty);
1810 return getTruncateOrNoop(NewOp, Ty);
1813 // Next try a zext cast. If the cast is folded, use it.
1814 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1815 if (!isa<SCEVZeroExtendExpr>(ZExt))
1818 // Next try a sext cast. If the cast is folded, use it.
1819 const SCEV *SExt = getSignExtendExpr(Op, Ty);
1820 if (!isa<SCEVSignExtendExpr>(SExt))
1823 // Force the cast to be folded into the operands of an addrec.
1824 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1825 SmallVector<const SCEV *, 4> Ops;
1826 for (const SCEV *Op : AR->operands())
1827 Ops.push_back(getAnyExtendExpr(Op, Ty));
1828 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1831 // If the expression is obviously signed, use the sext cast value.
1832 if (isa<SCEVSMaxExpr>(Op))
1835 // Absent any other information, use the zext cast value.
1839 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1840 /// a list of operands to be added under the given scale, update the given
1841 /// map. This is a helper function for getAddRecExpr. As an example of
1842 /// what it does, given a sequence of operands that would form an add
1843 /// expression like this:
1845 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
1847 /// where A and B are constants, update the map with these values:
1849 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1851 /// and add 13 + A*B*29 to AccumulatedConstant.
1852 /// This will allow getAddRecExpr to produce this:
1854 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1856 /// This form often exposes folding opportunities that are hidden in
1857 /// the original operand list.
1859 /// Return true iff it appears that any interesting folding opportunities
1860 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1861 /// the common case where no interesting opportunities are present, and
1862 /// is also used as a check to avoid infinite recursion.
1865 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1866 SmallVectorImpl<const SCEV *> &NewOps,
1867 APInt &AccumulatedConstant,
1868 const SCEV *const *Ops, size_t NumOperands,
1870 ScalarEvolution &SE) {
1871 bool Interesting = false;
1873 // Iterate over the add operands. They are sorted, with constants first.
1875 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1877 // Pull a buried constant out to the outside.
1878 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1880 AccumulatedConstant += Scale * C->getAPInt();
1883 // Next comes everything else. We're especially interested in multiplies
1884 // here, but they're in the middle, so just visit the rest with one loop.
1885 for (; i != NumOperands; ++i) {
1886 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1887 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1889 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
1890 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1891 // A multiplication of a constant with another add; recurse.
1892 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1894 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1895 Add->op_begin(), Add->getNumOperands(),
1898 // A multiplication of a constant with some other value. Update
1900 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1901 const SCEV *Key = SE.getMulExpr(MulOps);
1902 auto Pair = M.insert(std::make_pair(Key, NewScale));
1904 NewOps.push_back(Pair.first->first);
1906 Pair.first->second += NewScale;
1907 // The map already had an entry for this value, which may indicate
1908 // a folding opportunity.
1913 // An ordinary operand. Update the map.
1914 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1915 M.insert(std::make_pair(Ops[i], Scale));
1917 NewOps.push_back(Pair.first->first);
1919 Pair.first->second += Scale;
1920 // The map already had an entry for this value, which may indicate
1921 // a folding opportunity.
1930 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
1931 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
1932 // can't-overflow flags for the operation if possible.
1933 static SCEV::NoWrapFlags
1934 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
1935 const SmallVectorImpl<const SCEV *> &Ops,
1936 SCEV::NoWrapFlags Flags) {
1937 using namespace std::placeholders;
1938 typedef OverflowingBinaryOperator OBO;
1941 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
1943 assert(CanAnalyze && "don't call from other places!");
1945 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1946 SCEV::NoWrapFlags SignOrUnsignWrap =
1947 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
1949 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1950 auto IsKnownNonNegative = [&](const SCEV *S) {
1951 return SE->isKnownNonNegative(S);
1954 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
1956 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1958 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
1960 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr &&
1961 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) {
1963 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow
1964 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow
1966 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
1967 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
1969 ConstantRange::makeNoWrapRegion(Instruction::Add, C, OBO::NoSignedWrap);
1970 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
1971 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
1973 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
1975 ConstantRange::makeNoWrapRegion(Instruction::Add, C,
1976 OBO::NoUnsignedWrap);
1977 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
1978 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
1985 /// getAddExpr - Get a canonical add expression, or something simpler if
1987 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1988 SCEV::NoWrapFlags Flags) {
1989 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1990 "only nuw or nsw allowed");
1991 assert(!Ops.empty() && "Cannot get empty add!");
1992 if (Ops.size() == 1) return Ops[0];
1994 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1995 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1996 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1997 "SCEVAddExpr operand types don't match!");
2000 // Sort by complexity, this groups all similar expression types together.
2001 GroupByComplexity(Ops, &LI);
2003 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
2005 // If there are any constants, fold them together.
2007 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2009 assert(Idx < Ops.size());
2010 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2011 // We found two constants, fold them together!
2012 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2013 if (Ops.size() == 2) return Ops[0];
2014 Ops.erase(Ops.begin()+1); // Erase the folded element
2015 LHSC = cast<SCEVConstant>(Ops[0]);
2018 // If we are left with a constant zero being added, strip it off.
2019 if (LHSC->getValue()->isZero()) {
2020 Ops.erase(Ops.begin());
2024 if (Ops.size() == 1) return Ops[0];
2027 // Okay, check to see if the same value occurs in the operand list more than
2028 // once. If so, merge them together into an multiply expression. Since we
2029 // sorted the list, these values are required to be adjacent.
2030 Type *Ty = Ops[0]->getType();
2031 bool FoundMatch = false;
2032 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2033 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2034 // Scan ahead to count how many equal operands there are.
2036 while (i+Count != e && Ops[i+Count] == Ops[i])
2038 // Merge the values into a multiply.
2039 const SCEV *Scale = getConstant(Ty, Count);
2040 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
2041 if (Ops.size() == Count)
2044 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2045 --i; e -= Count - 1;
2049 return getAddExpr(Ops, Flags);
2051 // Check for truncates. If all the operands are truncated from the same
2052 // type, see if factoring out the truncate would permit the result to be
2053 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
2054 // if the contents of the resulting outer trunc fold to something simple.
2055 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
2056 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
2057 Type *DstType = Trunc->getType();
2058 Type *SrcType = Trunc->getOperand()->getType();
2059 SmallVector<const SCEV *, 8> LargeOps;
2061 // Check all the operands to see if they can be represented in the
2062 // source type of the truncate.
2063 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2064 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2065 if (T->getOperand()->getType() != SrcType) {
2069 LargeOps.push_back(T->getOperand());
2070 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2071 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2072 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2073 SmallVector<const SCEV *, 8> LargeMulOps;
2074 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2075 if (const SCEVTruncateExpr *T =
2076 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2077 if (T->getOperand()->getType() != SrcType) {
2081 LargeMulOps.push_back(T->getOperand());
2082 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2083 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2090 LargeOps.push_back(getMulExpr(LargeMulOps));
2097 // Evaluate the expression in the larger type.
2098 const SCEV *Fold = getAddExpr(LargeOps, Flags);
2099 // If it folds to something simple, use it. Otherwise, don't.
2100 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2101 return getTruncateExpr(Fold, DstType);
2105 // Skip past any other cast SCEVs.
2106 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2109 // If there are add operands they would be next.
2110 if (Idx < Ops.size()) {
2111 bool DeletedAdd = false;
2112 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2113 // If we have an add, expand the add operands onto the end of the operands
2115 Ops.erase(Ops.begin()+Idx);
2116 Ops.append(Add->op_begin(), Add->op_end());
2120 // If we deleted at least one add, we added operands to the end of the list,
2121 // and they are not necessarily sorted. Recurse to resort and resimplify
2122 // any operands we just acquired.
2124 return getAddExpr(Ops);
2127 // Skip over the add expression until we get to a multiply.
2128 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2131 // Check to see if there are any folding opportunities present with
2132 // operands multiplied by constant values.
2133 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2134 uint64_t BitWidth = getTypeSizeInBits(Ty);
2135 DenseMap<const SCEV *, APInt> M;
2136 SmallVector<const SCEV *, 8> NewOps;
2137 APInt AccumulatedConstant(BitWidth, 0);
2138 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2139 Ops.data(), Ops.size(),
2140 APInt(BitWidth, 1), *this)) {
2141 struct APIntCompare {
2142 bool operator()(const APInt &LHS, const APInt &RHS) const {
2143 return LHS.ult(RHS);
2147 // Some interesting folding opportunity is present, so its worthwhile to
2148 // re-generate the operands list. Group the operands by constant scale,
2149 // to avoid multiplying by the same constant scale multiple times.
2150 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2151 for (const SCEV *NewOp : NewOps)
2152 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2153 // Re-generate the operands list.
2155 if (AccumulatedConstant != 0)
2156 Ops.push_back(getConstant(AccumulatedConstant));
2157 for (auto &MulOp : MulOpLists)
2158 if (MulOp.first != 0)
2159 Ops.push_back(getMulExpr(getConstant(MulOp.first),
2160 getAddExpr(MulOp.second)));
2163 if (Ops.size() == 1)
2165 return getAddExpr(Ops);
2169 // If we are adding something to a multiply expression, make sure the
2170 // something is not already an operand of the multiply. If so, merge it into
2172 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2173 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2174 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2175 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2176 if (isa<SCEVConstant>(MulOpSCEV))
2178 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2179 if (MulOpSCEV == Ops[AddOp]) {
2180 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2181 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2182 if (Mul->getNumOperands() != 2) {
2183 // If the multiply has more than two operands, we must get the
2185 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2186 Mul->op_begin()+MulOp);
2187 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2188 InnerMul = getMulExpr(MulOps);
2190 const SCEV *One = getOne(Ty);
2191 const SCEV *AddOne = getAddExpr(One, InnerMul);
2192 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
2193 if (Ops.size() == 2) return OuterMul;
2195 Ops.erase(Ops.begin()+AddOp);
2196 Ops.erase(Ops.begin()+Idx-1);
2198 Ops.erase(Ops.begin()+Idx);
2199 Ops.erase(Ops.begin()+AddOp-1);
2201 Ops.push_back(OuterMul);
2202 return getAddExpr(Ops);
2205 // Check this multiply against other multiplies being added together.
2206 for (unsigned OtherMulIdx = Idx+1;
2207 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2209 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2210 // If MulOp occurs in OtherMul, we can fold the two multiplies
2212 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2213 OMulOp != e; ++OMulOp)
2214 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2215 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2216 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2217 if (Mul->getNumOperands() != 2) {
2218 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2219 Mul->op_begin()+MulOp);
2220 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2221 InnerMul1 = getMulExpr(MulOps);
2223 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2224 if (OtherMul->getNumOperands() != 2) {
2225 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2226 OtherMul->op_begin()+OMulOp);
2227 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2228 InnerMul2 = getMulExpr(MulOps);
2230 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
2231 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
2232 if (Ops.size() == 2) return OuterMul;
2233 Ops.erase(Ops.begin()+Idx);
2234 Ops.erase(Ops.begin()+OtherMulIdx-1);
2235 Ops.push_back(OuterMul);
2236 return getAddExpr(Ops);
2242 // If there are any add recurrences in the operands list, see if any other
2243 // added values are loop invariant. If so, we can fold them into the
2245 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2248 // Scan over all recurrences, trying to fold loop invariants into them.
2249 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2250 // Scan all of the other operands to this add and add them to the vector if
2251 // they are loop invariant w.r.t. the recurrence.
2252 SmallVector<const SCEV *, 8> LIOps;
2253 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2254 const Loop *AddRecLoop = AddRec->getLoop();
2255 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2256 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2257 LIOps.push_back(Ops[i]);
2258 Ops.erase(Ops.begin()+i);
2262 // If we found some loop invariants, fold them into the recurrence.
2263 if (!LIOps.empty()) {
2264 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2265 LIOps.push_back(AddRec->getStart());
2267 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2269 AddRecOps[0] = getAddExpr(LIOps);
2271 // Build the new addrec. Propagate the NUW and NSW flags if both the
2272 // outer add and the inner addrec are guaranteed to have no overflow.
2273 // Always propagate NW.
2274 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2275 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2277 // If all of the other operands were loop invariant, we are done.
2278 if (Ops.size() == 1) return NewRec;
2280 // Otherwise, add the folded AddRec by the non-invariant parts.
2281 for (unsigned i = 0;; ++i)
2282 if (Ops[i] == AddRec) {
2286 return getAddExpr(Ops);
2289 // Okay, if there weren't any loop invariants to be folded, check to see if
2290 // there are multiple AddRec's with the same loop induction variable being
2291 // added together. If so, we can fold them.
2292 for (unsigned OtherIdx = Idx+1;
2293 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2295 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2296 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2297 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2299 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2301 if (const auto *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
2302 if (OtherAddRec->getLoop() == AddRecLoop) {
2303 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2305 if (i >= AddRecOps.size()) {
2306 AddRecOps.append(OtherAddRec->op_begin()+i,
2307 OtherAddRec->op_end());
2310 AddRecOps[i] = getAddExpr(AddRecOps[i],
2311 OtherAddRec->getOperand(i));
2313 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2315 // Step size has changed, so we cannot guarantee no self-wraparound.
2316 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2317 return getAddExpr(Ops);
2320 // Otherwise couldn't fold anything into this recurrence. Move onto the
2324 // Okay, it looks like we really DO need an add expr. Check to see if we
2325 // already have one, otherwise create a new one.
2326 FoldingSetNodeID ID;
2327 ID.AddInteger(scAddExpr);
2328 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2329 ID.AddPointer(Ops[i]);
2332 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2334 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2335 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2336 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
2338 UniqueSCEVs.InsertNode(S, IP);
2340 S->setNoWrapFlags(Flags);
2344 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2346 if (j > 1 && k / j != i) Overflow = true;
2350 /// Compute the result of "n choose k", the binomial coefficient. If an
2351 /// intermediate computation overflows, Overflow will be set and the return will
2352 /// be garbage. Overflow is not cleared on absence of overflow.
2353 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2354 // We use the multiplicative formula:
2355 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2356 // At each iteration, we take the n-th term of the numeral and divide by the
2357 // (k-n)th term of the denominator. This division will always produce an
2358 // integral result, and helps reduce the chance of overflow in the
2359 // intermediate computations. However, we can still overflow even when the
2360 // final result would fit.
2362 if (n == 0 || n == k) return 1;
2363 if (k > n) return 0;
2369 for (uint64_t i = 1; i <= k; ++i) {
2370 r = umul_ov(r, n-(i-1), Overflow);
2376 /// Determine if any of the operands in this SCEV are a constant or if
2377 /// any of the add or multiply expressions in this SCEV contain a constant.
2378 static bool containsConstantSomewhere(const SCEV *StartExpr) {
2379 SmallVector<const SCEV *, 4> Ops;
2380 Ops.push_back(StartExpr);
2381 while (!Ops.empty()) {
2382 const SCEV *CurrentExpr = Ops.pop_back_val();
2383 if (isa<SCEVConstant>(*CurrentExpr))
2386 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) {
2387 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr);
2388 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end());
2394 /// getMulExpr - Get a canonical multiply expression, or something simpler if
2396 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2397 SCEV::NoWrapFlags Flags) {
2398 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2399 "only nuw or nsw allowed");
2400 assert(!Ops.empty() && "Cannot get empty mul!");
2401 if (Ops.size() == 1) return Ops[0];
2403 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2404 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2405 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2406 "SCEVMulExpr operand types don't match!");
2409 // Sort by complexity, this groups all similar expression types together.
2410 GroupByComplexity(Ops, &LI);
2412 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
2414 // If there are any constants, fold them together.
2416 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2418 // C1*(C2+V) -> C1*C2 + C1*V
2419 if (Ops.size() == 2)
2420 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2421 // If any of Add's ops are Adds or Muls with a constant,
2422 // apply this transformation as well.
2423 if (Add->getNumOperands() == 2)
2424 if (containsConstantSomewhere(Add))
2425 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
2426 getMulExpr(LHSC, Add->getOperand(1)));
2429 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2430 // We found two constants, fold them together!
2432 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt());
2433 Ops[0] = getConstant(Fold);
2434 Ops.erase(Ops.begin()+1); // Erase the folded element
2435 if (Ops.size() == 1) return Ops[0];
2436 LHSC = cast<SCEVConstant>(Ops[0]);
2439 // If we are left with a constant one being multiplied, strip it off.
2440 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
2441 Ops.erase(Ops.begin());
2443 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
2444 // If we have a multiply of zero, it will always be zero.
2446 } else if (Ops[0]->isAllOnesValue()) {
2447 // If we have a mul by -1 of an add, try distributing the -1 among the
2449 if (Ops.size() == 2) {
2450 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2451 SmallVector<const SCEV *, 4> NewOps;
2452 bool AnyFolded = false;
2453 for (const SCEV *AddOp : Add->operands()) {
2454 const SCEV *Mul = getMulExpr(Ops[0], AddOp);
2455 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2456 NewOps.push_back(Mul);
2459 return getAddExpr(NewOps);
2460 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2461 // Negation preserves a recurrence's no self-wrap property.
2462 SmallVector<const SCEV *, 4> Operands;
2463 for (const SCEV *AddRecOp : AddRec->operands())
2464 Operands.push_back(getMulExpr(Ops[0], AddRecOp));
2466 return getAddRecExpr(Operands, AddRec->getLoop(),
2467 AddRec->getNoWrapFlags(SCEV::FlagNW));
2472 if (Ops.size() == 1)
2476 // Skip over the add expression until we get to a multiply.
2477 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2480 // If there are mul operands inline them all into this expression.
2481 if (Idx < Ops.size()) {
2482 bool DeletedMul = false;
2483 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2484 // If we have an mul, expand the mul operands onto the end of the operands
2486 Ops.erase(Ops.begin()+Idx);
2487 Ops.append(Mul->op_begin(), Mul->op_end());
2491 // If we deleted at least one mul, we added operands to the end of the list,
2492 // and they are not necessarily sorted. Recurse to resort and resimplify
2493 // any operands we just acquired.
2495 return getMulExpr(Ops);
2498 // If there are any add recurrences in the operands list, see if any other
2499 // added values are loop invariant. If so, we can fold them into the
2501 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2504 // Scan over all recurrences, trying to fold loop invariants into them.
2505 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2506 // Scan all of the other operands to this mul and add them to the vector if
2507 // they are loop invariant w.r.t. the recurrence.
2508 SmallVector<const SCEV *, 8> LIOps;
2509 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2510 const Loop *AddRecLoop = AddRec->getLoop();
2511 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2512 if (isLoopInvariant(Ops[i], AddRecLoop)) {
2513 LIOps.push_back(Ops[i]);
2514 Ops.erase(Ops.begin()+i);
2518 // If we found some loop invariants, fold them into the recurrence.
2519 if (!LIOps.empty()) {
2520 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2521 SmallVector<const SCEV *, 4> NewOps;
2522 NewOps.reserve(AddRec->getNumOperands());
2523 const SCEV *Scale = getMulExpr(LIOps);
2524 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2525 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2527 // Build the new addrec. Propagate the NUW and NSW flags if both the
2528 // outer mul and the inner addrec are guaranteed to have no overflow.
2530 // No self-wrap cannot be guaranteed after changing the step size, but
2531 // will be inferred if either NUW or NSW is true.
2532 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2533 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2535 // If all of the other operands were loop invariant, we are done.
2536 if (Ops.size() == 1) return NewRec;
2538 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2539 for (unsigned i = 0;; ++i)
2540 if (Ops[i] == AddRec) {
2544 return getMulExpr(Ops);
2547 // Okay, if there weren't any loop invariants to be folded, check to see if
2548 // there are multiple AddRec's with the same loop induction variable being
2549 // multiplied together. If so, we can fold them.
2551 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2552 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2553 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2554 // ]]],+,...up to x=2n}.
2555 // Note that the arguments to choose() are always integers with values
2556 // known at compile time, never SCEV objects.
2558 // The implementation avoids pointless extra computations when the two
2559 // addrec's are of different length (mathematically, it's equivalent to
2560 // an infinite stream of zeros on the right).
2561 bool OpsModified = false;
2562 for (unsigned OtherIdx = Idx+1;
2563 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2565 const SCEVAddRecExpr *OtherAddRec =
2566 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2567 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2570 bool Overflow = false;
2571 Type *Ty = AddRec->getType();
2572 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2573 SmallVector<const SCEV*, 7> AddRecOps;
2574 for (int x = 0, xe = AddRec->getNumOperands() +
2575 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2576 const SCEV *Term = getZero(Ty);
2577 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2578 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2579 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2580 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2581 z < ze && !Overflow; ++z) {
2582 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2584 if (LargerThan64Bits)
2585 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2587 Coeff = Coeff1*Coeff2;
2588 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2589 const SCEV *Term1 = AddRec->getOperand(y-z);
2590 const SCEV *Term2 = OtherAddRec->getOperand(z);
2591 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2594 AddRecOps.push_back(Term);
2597 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2599 if (Ops.size() == 2) return NewAddRec;
2600 Ops[Idx] = NewAddRec;
2601 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2603 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2609 return getMulExpr(Ops);
2611 // Otherwise couldn't fold anything into this recurrence. Move onto the
2615 // Okay, it looks like we really DO need an mul expr. Check to see if we
2616 // already have one, otherwise create a new one.
2617 FoldingSetNodeID ID;
2618 ID.AddInteger(scMulExpr);
2619 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2620 ID.AddPointer(Ops[i]);
2623 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2625 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2626 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2627 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2629 UniqueSCEVs.InsertNode(S, IP);
2631 S->setNoWrapFlags(Flags);
2635 /// getUDivExpr - Get a canonical unsigned division expression, or something
2636 /// simpler if possible.
2637 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2639 assert(getEffectiveSCEVType(LHS->getType()) ==
2640 getEffectiveSCEVType(RHS->getType()) &&
2641 "SCEVUDivExpr operand types don't match!");
2643 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2644 if (RHSC->getValue()->equalsInt(1))
2645 return LHS; // X udiv 1 --> x
2646 // If the denominator is zero, the result of the udiv is undefined. Don't
2647 // try to analyze it, because the resolution chosen here may differ from
2648 // the resolution chosen in other parts of the compiler.
2649 if (!RHSC->getValue()->isZero()) {
2650 // Determine if the division can be folded into the operands of
2652 // TODO: Generalize this to non-constants by using known-bits information.
2653 Type *Ty = LHS->getType();
2654 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
2655 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2656 // For non-power-of-two values, effectively round the value up to the
2657 // nearest power of two.
2658 if (!RHSC->getAPInt().isPowerOf2())
2660 IntegerType *ExtTy =
2661 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2662 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2663 if (const SCEVConstant *Step =
2664 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2665 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2666 const APInt &StepInt = Step->getAPInt();
2667 const APInt &DivInt = RHSC->getAPInt();
2668 if (!StepInt.urem(DivInt) &&
2669 getZeroExtendExpr(AR, ExtTy) ==
2670 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2671 getZeroExtendExpr(Step, ExtTy),
2672 AR->getLoop(), SCEV::FlagAnyWrap)) {
2673 SmallVector<const SCEV *, 4> Operands;
2674 for (const SCEV *Op : AR->operands())
2675 Operands.push_back(getUDivExpr(Op, RHS));
2676 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
2678 /// Get a canonical UDivExpr for a recurrence.
2679 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2680 // We can currently only fold X%N if X is constant.
2681 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2682 if (StartC && !DivInt.urem(StepInt) &&
2683 getZeroExtendExpr(AR, ExtTy) ==
2684 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2685 getZeroExtendExpr(Step, ExtTy),
2686 AR->getLoop(), SCEV::FlagAnyWrap)) {
2687 const APInt &StartInt = StartC->getAPInt();
2688 const APInt &StartRem = StartInt.urem(StepInt);
2690 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2691 AR->getLoop(), SCEV::FlagNW);
2694 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2695 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2696 SmallVector<const SCEV *, 4> Operands;
2697 for (const SCEV *Op : M->operands())
2698 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
2699 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2700 // Find an operand that's safely divisible.
2701 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2702 const SCEV *Op = M->getOperand(i);
2703 const SCEV *Div = getUDivExpr(Op, RHSC);
2704 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2705 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2708 return getMulExpr(Operands);
2712 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2713 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2714 SmallVector<const SCEV *, 4> Operands;
2715 for (const SCEV *Op : A->operands())
2716 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
2717 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2719 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2720 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2721 if (isa<SCEVUDivExpr>(Op) ||
2722 getMulExpr(Op, RHS) != A->getOperand(i))
2724 Operands.push_back(Op);
2726 if (Operands.size() == A->getNumOperands())
2727 return getAddExpr(Operands);
2731 // Fold if both operands are constant.
2732 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2733 Constant *LHSCV = LHSC->getValue();
2734 Constant *RHSCV = RHSC->getValue();
2735 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2741 FoldingSetNodeID ID;
2742 ID.AddInteger(scUDivExpr);
2746 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2747 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2749 UniqueSCEVs.InsertNode(S, IP);
2753 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
2754 APInt A = C1->getAPInt().abs();
2755 APInt B = C2->getAPInt().abs();
2756 uint32_t ABW = A.getBitWidth();
2757 uint32_t BBW = B.getBitWidth();
2764 return APIntOps::GreatestCommonDivisor(A, B);
2767 /// getUDivExactExpr - Get a canonical unsigned division expression, or
2768 /// something simpler if possible. There is no representation for an exact udiv
2769 /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
2770 /// We can't do this when it's not exact because the udiv may be clearing bits.
2771 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
2773 // TODO: we could try to find factors in all sorts of things, but for now we
2774 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
2775 // end of this file for inspiration.
2777 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
2779 return getUDivExpr(LHS, RHS);
2781 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
2782 // If the mulexpr multiplies by a constant, then that constant must be the
2783 // first element of the mulexpr.
2784 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2785 if (LHSCst == RHSCst) {
2786 SmallVector<const SCEV *, 2> Operands;
2787 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2788 return getMulExpr(Operands);
2791 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
2792 // that there's a factor provided by one of the other terms. We need to
2794 APInt Factor = gcd(LHSCst, RHSCst);
2795 if (!Factor.isIntN(1)) {
2797 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
2799 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
2800 SmallVector<const SCEV *, 2> Operands;
2801 Operands.push_back(LHSCst);
2802 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2803 LHS = getMulExpr(Operands);
2805 Mul = dyn_cast<SCEVMulExpr>(LHS);
2807 return getUDivExactExpr(LHS, RHS);
2812 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
2813 if (Mul->getOperand(i) == RHS) {
2814 SmallVector<const SCEV *, 2> Operands;
2815 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
2816 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
2817 return getMulExpr(Operands);
2821 return getUDivExpr(LHS, RHS);
2824 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2825 /// Simplify the expression as much as possible.
2826 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2828 SCEV::NoWrapFlags Flags) {
2829 SmallVector<const SCEV *, 4> Operands;
2830 Operands.push_back(Start);
2831 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2832 if (StepChrec->getLoop() == L) {
2833 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2834 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2837 Operands.push_back(Step);
2838 return getAddRecExpr(Operands, L, Flags);
2841 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2842 /// Simplify the expression as much as possible.
2844 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2845 const Loop *L, SCEV::NoWrapFlags Flags) {
2846 if (Operands.size() == 1) return Operands[0];
2848 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2849 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2850 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2851 "SCEVAddRecExpr operand types don't match!");
2852 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2853 assert(isLoopInvariant(Operands[i], L) &&
2854 "SCEVAddRecExpr operand is not loop-invariant!");
2857 if (Operands.back()->isZero()) {
2858 Operands.pop_back();
2859 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
2862 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2863 // use that information to infer NUW and NSW flags. However, computing a
2864 // BE count requires calling getAddRecExpr, so we may not yet have a
2865 // meaningful BE count at this point (and if we don't, we'd be stuck
2866 // with a SCEVCouldNotCompute as the cached BE count).
2868 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
2870 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2871 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2872 const Loop *NestedLoop = NestedAR->getLoop();
2873 if (L->contains(NestedLoop)
2874 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
2875 : (!NestedLoop->contains(L) &&
2876 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
2877 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2878 NestedAR->op_end());
2879 Operands[0] = NestedAR->getStart();
2880 // AddRecs require their operands be loop-invariant with respect to their
2881 // loops. Don't perform this transformation if it would break this
2883 bool AllInvariant = all_of(
2884 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
2887 // Create a recurrence for the outer loop with the same step size.
2889 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2890 // inner recurrence has the same property.
2891 SCEV::NoWrapFlags OuterFlags =
2892 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2894 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2895 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
2896 return isLoopInvariant(Op, NestedLoop);
2900 // Ok, both add recurrences are valid after the transformation.
2902 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2903 // the outer recurrence has the same property.
2904 SCEV::NoWrapFlags InnerFlags =
2905 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2906 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2909 // Reset Operands to its original state.
2910 Operands[0] = NestedAR;
2914 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2915 // already have one, otherwise create a new one.
2916 FoldingSetNodeID ID;
2917 ID.AddInteger(scAddRecExpr);
2918 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2919 ID.AddPointer(Operands[i]);
2923 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2925 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2926 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2927 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2928 O, Operands.size(), L);
2929 UniqueSCEVs.InsertNode(S, IP);
2931 S->setNoWrapFlags(Flags);
2936 ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
2937 const SmallVectorImpl<const SCEV *> &IndexExprs,
2939 // getSCEV(Base)->getType() has the same address space as Base->getType()
2940 // because SCEV::getType() preserves the address space.
2941 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
2942 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
2943 // instruction to its SCEV, because the Instruction may be guarded by control
2944 // flow and the no-overflow bits may not be valid for the expression in any
2945 // context. This can be fixed similarly to how these flags are handled for
2947 SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
2949 const SCEV *TotalOffset = getZero(IntPtrTy);
2950 // The address space is unimportant. The first thing we do on CurTy is getting
2951 // its element type.
2952 Type *CurTy = PointerType::getUnqual(PointeeType);
2953 for (const SCEV *IndexExpr : IndexExprs) {
2954 // Compute the (potentially symbolic) offset in bytes for this index.
2955 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
2956 // For a struct, add the member offset.
2957 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
2958 unsigned FieldNo = Index->getZExtValue();
2959 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
2961 // Add the field offset to the running total offset.
2962 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
2964 // Update CurTy to the type of the field at Index.
2965 CurTy = STy->getTypeAtIndex(Index);
2967 // Update CurTy to its element type.
2968 CurTy = cast<SequentialType>(CurTy)->getElementType();
2969 // For an array, add the element offset, explicitly scaled.
2970 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
2971 // Getelementptr indices are signed.
2972 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
2974 // Multiply the index by the element size to compute the element offset.
2975 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
2977 // Add the element offset to the running total offset.
2978 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2982 // Add the total offset from all the GEP indices to the base.
2983 return getAddExpr(BaseExpr, TotalOffset, Wrap);
2986 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2988 SmallVector<const SCEV *, 2> Ops;
2991 return getSMaxExpr(Ops);
2995 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2996 assert(!Ops.empty() && "Cannot get empty smax!");
2997 if (Ops.size() == 1) return Ops[0];
2999 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3000 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3001 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3002 "SCEVSMaxExpr operand types don't match!");
3005 // Sort by complexity, this groups all similar expression types together.
3006 GroupByComplexity(Ops, &LI);
3008 // If there are any constants, fold them together.
3010 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3012 assert(Idx < Ops.size());
3013 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3014 // We found two constants, fold them together!
3015 ConstantInt *Fold = ConstantInt::get(
3016 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt()));
3017 Ops[0] = getConstant(Fold);
3018 Ops.erase(Ops.begin()+1); // Erase the folded element
3019 if (Ops.size() == 1) return Ops[0];
3020 LHSC = cast<SCEVConstant>(Ops[0]);
3023 // If we are left with a constant minimum-int, strip it off.
3024 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
3025 Ops.erase(Ops.begin());
3027 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
3028 // If we have an smax with a constant maximum-int, it will always be
3033 if (Ops.size() == 1) return Ops[0];
3036 // Find the first SMax
3037 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
3040 // Check to see if one of the operands is an SMax. If so, expand its operands
3041 // onto our operand list, and recurse to simplify.
3042 if (Idx < Ops.size()) {
3043 bool DeletedSMax = false;
3044 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
3045 Ops.erase(Ops.begin()+Idx);
3046 Ops.append(SMax->op_begin(), SMax->op_end());
3051 return getSMaxExpr(Ops);
3054 // Okay, check to see if the same value occurs in the operand list twice. If
3055 // so, delete one. Since we sorted the list, these values are required to
3057 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
3058 // X smax Y smax Y --> X smax Y
3059 // X smax Y --> X, if X is always greater than Y
3060 if (Ops[i] == Ops[i+1] ||
3061 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
3062 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
3064 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
3065 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
3069 if (Ops.size() == 1) return Ops[0];
3071 assert(!Ops.empty() && "Reduced smax down to nothing!");
3073 // Okay, it looks like we really DO need an smax expr. Check to see if we
3074 // already have one, otherwise create a new one.
3075 FoldingSetNodeID ID;
3076 ID.AddInteger(scSMaxExpr);
3077 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3078 ID.AddPointer(Ops[i]);
3080 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3081 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3082 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3083 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
3085 UniqueSCEVs.InsertNode(S, IP);
3089 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
3091 SmallVector<const SCEV *, 2> Ops;
3094 return getUMaxExpr(Ops);
3098 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3099 assert(!Ops.empty() && "Cannot get empty umax!");
3100 if (Ops.size() == 1) return Ops[0];
3102 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3103 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3104 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3105 "SCEVUMaxExpr operand types don't match!");
3108 // Sort by complexity, this groups all similar expression types together.
3109 GroupByComplexity(Ops, &LI);
3111 // If there are any constants, fold them together.
3113 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3115 assert(Idx < Ops.size());
3116 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3117 // We found two constants, fold them together!
3118 ConstantInt *Fold = ConstantInt::get(
3119 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt()));
3120 Ops[0] = getConstant(Fold);
3121 Ops.erase(Ops.begin()+1); // Erase the folded element
3122 if (Ops.size() == 1) return Ops[0];
3123 LHSC = cast<SCEVConstant>(Ops[0]);
3126 // If we are left with a constant minimum-int, strip it off.
3127 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
3128 Ops.erase(Ops.begin());
3130 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
3131 // If we have an umax with a constant maximum-int, it will always be
3136 if (Ops.size() == 1) return Ops[0];
3139 // Find the first UMax
3140 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
3143 // Check to see if one of the operands is a UMax. If so, expand its operands
3144 // onto our operand list, and recurse to simplify.
3145 if (Idx < Ops.size()) {
3146 bool DeletedUMax = false;
3147 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
3148 Ops.erase(Ops.begin()+Idx);
3149 Ops.append(UMax->op_begin(), UMax->op_end());
3154 return getUMaxExpr(Ops);
3157 // Okay, check to see if the same value occurs in the operand list twice. If
3158 // so, delete one. Since we sorted the list, these values are required to
3160 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
3161 // X umax Y umax Y --> X umax Y
3162 // X umax Y --> X, if X is always greater than Y
3163 if (Ops[i] == Ops[i+1] ||
3164 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
3165 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
3167 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
3168 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
3172 if (Ops.size() == 1) return Ops[0];
3174 assert(!Ops.empty() && "Reduced umax down to nothing!");
3176 // Okay, it looks like we really DO need a umax expr. Check to see if we
3177 // already have one, otherwise create a new one.
3178 FoldingSetNodeID ID;
3179 ID.AddInteger(scUMaxExpr);
3180 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3181 ID.AddPointer(Ops[i]);
3183 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3184 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3185 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3186 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
3188 UniqueSCEVs.InsertNode(S, IP);
3192 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3194 // ~smax(~x, ~y) == smin(x, y).
3195 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3198 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3200 // ~umax(~x, ~y) == umin(x, y)
3201 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
3204 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3205 // We can bypass creating a target-independent
3206 // constant expression and then folding it back into a ConstantInt.
3207 // This is just a compile-time optimization.
3208 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3211 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3214 // We can bypass creating a target-independent
3215 // constant expression and then folding it back into a ConstantInt.
3216 // This is just a compile-time optimization.
3218 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3221 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3222 // Don't attempt to do anything other than create a SCEVUnknown object
3223 // here. createSCEV only calls getUnknown after checking for all other
3224 // interesting possibilities, and any other code that calls getUnknown
3225 // is doing so in order to hide a value from SCEV canonicalization.
3227 FoldingSetNodeID ID;
3228 ID.AddInteger(scUnknown);
3231 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3232 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3233 "Stale SCEVUnknown in uniquing map!");
3236 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3238 FirstUnknown = cast<SCEVUnknown>(S);
3239 UniqueSCEVs.InsertNode(S, IP);
3243 //===----------------------------------------------------------------------===//
3244 // Basic SCEV Analysis and PHI Idiom Recognition Code
3247 /// isSCEVable - Test if values of the given type are analyzable within
3248 /// the SCEV framework. This primarily includes integer types, and it
3249 /// can optionally include pointer types if the ScalarEvolution class
3250 /// has access to target-specific information.
3251 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3252 // Integers and pointers are always SCEVable.
3253 return Ty->isIntegerTy() || Ty->isPointerTy();
3256 /// getTypeSizeInBits - Return the size in bits of the specified type,
3257 /// for which isSCEVable must return true.
3258 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3259 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3260 return getDataLayout().getTypeSizeInBits(Ty);
3263 /// getEffectiveSCEVType - Return a type with the same bitwidth as
3264 /// the given type and which represents how SCEV will treat the given
3265 /// type, for which isSCEVable must return true. For pointer types,
3266 /// this is the pointer-sized integer type.
3267 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3268 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3270 if (Ty->isIntegerTy())
3273 // The only other support type is pointer.
3274 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3275 return getDataLayout().getIntPtrType(Ty);
3278 const SCEV *ScalarEvolution::getCouldNotCompute() {
3279 return CouldNotCompute.get();
3283 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3284 // Helper class working with SCEVTraversal to figure out if a SCEV contains
3285 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
3286 // is set iff if find such SCEVUnknown.
3288 struct FindInvalidSCEVUnknown {
3290 FindInvalidSCEVUnknown() { FindOne = false; }
3291 bool follow(const SCEV *S) {
3292 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
3296 if (!cast<SCEVUnknown>(S)->getValue())
3303 bool isDone() const { return FindOne; }
3306 FindInvalidSCEVUnknown F;
3307 SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
3313 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
3314 /// expression and create a new one.
3315 const SCEV *ScalarEvolution::getSCEV(Value *V) {
3316 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3318 const SCEV *S = getExistingSCEV(V);
3321 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
3326 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
3327 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3329 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3330 if (I != ValueExprMap.end()) {
3331 const SCEV *S = I->second;
3332 if (checkValidity(S))
3334 ValueExprMap.erase(I);
3339 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
3341 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
3342 SCEV::NoWrapFlags Flags) {
3343 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3345 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3347 Type *Ty = V->getType();
3348 Ty = getEffectiveSCEVType(Ty);
3350 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags);
3353 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
3354 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
3355 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3357 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3359 Type *Ty = V->getType();
3360 Ty = getEffectiveSCEVType(Ty);
3361 const SCEV *AllOnes =
3362 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
3363 return getMinusSCEV(AllOnes, V);
3366 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.