From: Dan Gohman Date: Fri, 9 Oct 2009 00:10:36 +0000 (+0000) Subject: Add the ability to track HasNSW and HasNUW on more kinds of SCEV expressions. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=3645b01002e7ac244c1f3d163e5e350df21d869d;p=oota-llvm.git Add the ability to track HasNSW and HasNUW on more kinds of SCEV expressions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@83601 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index cb4d7c140d7..ed5d18eaf98 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -402,37 +402,45 @@ namespace llvm { const SCEV *getZeroExtendExpr(const SCEV *Op, const Type *Ty); const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty); const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty); - const SCEV *getAddExpr(SmallVectorImpl &Ops); - const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS) { + const SCEV *getAddExpr(SmallVectorImpl &Ops, + bool HasNUW = false, bool HasNSW = false); + const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS, + bool HasNUW = false, bool HasNSW = false) { SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); - return getAddExpr(Ops); + return getAddExpr(Ops, HasNUW, HasNSW); } const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, - const SCEV *Op2) { + const SCEV *Op2, + bool HasNUW = false, bool HasNSW = false) { SmallVector Ops; Ops.push_back(Op0); Ops.push_back(Op1); Ops.push_back(Op2); - return getAddExpr(Ops); + return getAddExpr(Ops, HasNUW, HasNSW); } - const SCEV *getMulExpr(SmallVectorImpl &Ops); - const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS) { + const SCEV *getMulExpr(SmallVectorImpl &Ops, + bool HasNUW = false, bool HasNSW = false); + const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS, + bool HasNUW = false, bool HasNSW = false) { SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); - return getMulExpr(Ops); + return getMulExpr(Ops, HasNUW, HasNSW); } const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS); const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, - const Loop *L); + const Loop *L, + bool HasNUW = false, bool HasNSW = false); const SCEV *getAddRecExpr(SmallVectorImpl &Operands, - const Loop *L); + const Loop *L, + bool HasNUW = false, bool HasNSW = false); const SCEV *getAddRecExpr(const SmallVectorImpl &Operands, - const Loop *L) { + const Loop *L, + bool HasNUW = false, bool HasNSW = false) { SmallVector NewOp(Operands.begin(), Operands.end()); - return getAddRecExpr(NewOp, L); + return getAddRecExpr(NewOp, L, HasNUW, HasNSW); } const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS); const SCEV *getSMaxExpr(SmallVectorImpl &Operands); diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index 67f5e06977c..2c503506035 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -234,6 +234,15 @@ namespace llvm { virtual const Type *getType() const { return getOperand(0)->getType(); } + bool hasNoUnsignedWrap() const { return SubclassData & (1 << 0); } + void setHasNoUnsignedWrap(bool B) { + SubclassData = (SubclassData & ~(1 << 0)) | (B << 0); + } + bool hasNoSignedWrap() const { return SubclassData & (1 << 1); } + void setHasNoSignedWrap(bool B) { + SubclassData = (SubclassData & ~(1 << 1)) | (B << 1); + } + /// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const SCEVNAryExpr *S) { return true; } static inline bool classof(const SCEV *S) { @@ -436,15 +445,6 @@ namespace llvm { return cast(SE.getAddExpr(this, getStepRecurrence(SE))); } - bool hasNoUnsignedWrap() const { return SubclassData & (1 << 0); } - void setHasNoUnsignedWrap(bool B) { - SubclassData = (SubclassData & ~(1 << 0)) | (B << 0); - } - bool hasNoSignedWrap() const { return SubclassData & (1 << 1); } - void setHasNoSignedWrap(bool B) { - SubclassData = (SubclassData & ~(1 << 1)) | (B << 1); - } - virtual void print(raw_ostream &OS) const; /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -464,6 +464,9 @@ namespace llvm { SCEVSMaxExpr(const FoldingSetNodeID &ID, const SmallVectorImpl &ops) : SCEVCommutativeExpr(ID, scSMaxExpr, ops) { + // Max never overflows. + setHasNoUnsignedWrap(true); + setHasNoSignedWrap(true); } public: @@ -486,6 +489,9 @@ namespace llvm { SCEVUMaxExpr(const FoldingSetNodeID &ID, const SmallVectorImpl &ops) : SCEVCommutativeExpr(ID, scUMaxExpr, ops) { + // Max never overflows. + setHasNoUnsignedWrap(true); + setHasNoSignedWrap(true); } public: diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 12ad4297ef6..9300de11685 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1191,7 +1191,8 @@ namespace { /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { +const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, + bool HasNUW, bool HasNSW) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1241,7 +1242,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { return Mul; Ops.erase(Ops.begin()+i, Ops.begin()+i+2); Ops.push_back(Mul); - return getAddExpr(Ops); + return getAddExpr(Ops, HasNUW, HasNSW); } // Check for truncates. If all the operands are truncated from the same @@ -1296,7 +1297,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { } if (Ok) { // Evaluate the expression in the larger type. - const SCEV *Fold = getAddExpr(LargeOps); + const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW); // If it folds to something simple, use it. Otherwise, don't. if (isa(Fold) || isa(Fold)) return getTruncateExpr(Fold, DstType); @@ -1516,16 +1517,19 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { ID.AddPointer(Ops[i]); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); + SCEVAddExpr *S = SCEVAllocator.Allocate(); new (S) SCEVAddExpr(ID, Ops); UniqueSCEVs.InsertNode(S, IP); + if (HasNUW) S->setHasNoUnsignedWrap(true); + if (HasNSW) S->setHasNoSignedWrap(true); return S; } /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { +const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, + bool HasNUW, bool HasNSW) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1688,9 +1692,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { ID.AddPointer(Ops[i]); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); + SCEVMulExpr *S = SCEVAllocator.Allocate(); new (S) SCEVMulExpr(ID, Ops); UniqueSCEVs.InsertNode(S, IP); + if (HasNUW) S->setHasNoUnsignedWrap(true); + if (HasNSW) S->setHasNoSignedWrap(true); return S; } @@ -1797,7 +1803,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, - const SCEV *Step, const Loop *L) { + const SCEV *Step, const Loop *L, + bool HasNUW, bool HasNSW) { SmallVector Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) @@ -1808,14 +1815,15 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, } Operands.push_back(Step); - return getAddRecExpr(Operands, L); + return getAddRecExpr(Operands, L, HasNUW, HasNSW); } /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. const SCEV * ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, - const Loop *L) { + const Loop *L, + bool HasNUW, bool HasNSW) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG for (unsigned i = 1, e = Operands.size(); i != e; ++i) @@ -1826,7 +1834,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, if (Operands.back()->isZero()) { Operands.pop_back(); - return getAddRecExpr(Operands, L); // {X,+,0} --> X + return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X } // Canonicalize nested AddRecs in by nesting them in order of loop depth. @@ -1855,7 +1863,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, } if (AllInvariant) // Ok, both add recurrences are valid after the transformation. - return getAddRecExpr(NestedOperands, NestedLoop); + return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW); } // Reset Operands to its original state. Operands[0] = NestedAR; @@ -1870,9 +1878,11 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, ID.AddPointer(L); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); + SCEVAddRecExpr *S = SCEVAllocator.Allocate(); new (S) SCEVAddRecExpr(ID, Operands, L); UniqueSCEVs.InsertNode(S, IP); + if (HasNUW) S->setHasNoUnsignedWrap(true); + if (HasNSW) S->setHasNoSignedWrap(true); return S; }