X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FAnalysis%2FScalarEvolution.cpp;h=3c67a348fe84f735f705b93845411732f197e699;hp=a06321cecfd7d727582ebc02ed7c6a345422031e;hb=d13db2c59cc94162d6cf0a04187d408bfef6d4a7;hpb=1cd9275c8d612bd1c92fc7ba436b60aaead1efbf diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index a06321cecfd..3c67a348fe8 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -103,8 +103,8 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, "derived loop"), cl::init(100)); -static RegisterPass -R("scalar-evolution", "Scalar Evolution Analysis", false, true); +INITIALIZE_PASS(ScalarEvolution, "scalar-evolution", + "Scalar Evolution Analysis", false, true); char ScalarEvolution::ID = 0; //===----------------------------------------------------------------------===// @@ -141,7 +141,7 @@ bool SCEV::isAllOnesValue() const { } SCEVCouldNotCompute::SCEVCouldNotCompute() : - SCEV(FoldingSetNodeID(), scCouldNotCompute) {} + SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); @@ -177,8 +177,7 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { ID.AddPointer(V); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVConstant(ID, V); + SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -189,8 +188,8 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) { const SCEV * ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { - return getConstant( - ConstantInt::get(cast(Ty), V, isSigned)); + const IntegerType *ITy = cast(getEffectiveSCEVType(Ty)); + return getConstant(ConstantInt::get(ITy, V, isSigned)); } const Type *SCEVConstant::getType() const { return V->getType(); } @@ -199,7 +198,7 @@ void SCEVConstant::print(raw_ostream &OS) const { WriteAsOperand(OS, V, false); } -SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID, +SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned SCEVTy, const SCEV *op, const Type *ty) : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} @@ -211,11 +210,11 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { return Op->properlyDominates(BB, DT); } -SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID, +SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scTruncate, op, ty) { - assert((Op->getType()->isInteger() || isa(Op->getType())) && - (Ty->isInteger() || isa(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate non-integer value!"); } @@ -223,11 +222,11 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID, +SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scZeroExtend, op, ty) { - assert((Op->getType()->isInteger() || isa(Op->getType())) && - (Ty->isInteger() || isa(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot zero extend non-integer value!"); } @@ -235,11 +234,11 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID, +SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scSignExtend, op, ty) { - assert((Op->getType()->isInteger() || isa(Op->getType())) && - (Ty->isInteger() || isa(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot sign extend non-integer value!"); } @@ -248,11 +247,13 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const { } void SCEVCommutativeExpr::print(raw_ostream &OS) const { - assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); const char *OpStr = getOperationStr(); - OS << "(" << *Operands[0]; - for (unsigned i = 1, e = Operands.size(); i != e; ++i) - OS << OpStr << *Operands[i]; + OS << "("; + for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { + OS << **I; + if (next(I) != E) + OS << OpStr; + } OS << ")"; } @@ -312,24 +313,30 @@ bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { return true; } +bool +SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { + return DT->dominates(L->getHeader(), BB) && + SCEVNAryExpr::dominates(BB, DT); +} + +bool +SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { + // This uses a "dominates" query instead of "properly dominates" query because + // the instruction which produces the addrec's value is a PHI, and a PHI + // effectively properly dominates its entire containing block. + return DT->dominates(L->getHeader(), BB) && + SCEVNAryExpr::properlyDominates(BB, DT); +} + void SCEVAddRecExpr::print(raw_ostream &OS) const { OS << "{" << *Operands[0]; - for (unsigned i = 1, e = Operands.size(); i != e; ++i) + for (unsigned i = 1, e = NumOperands; i != e; ++i) OS << ",+," << *Operands[i]; OS << "}<"; WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); OS << ">"; } -void SCEVFieldOffsetExpr::print(raw_ostream &OS) const { - // LLVM struct fields don't have names, so just print the field number. - OS << "offsetof(" << *STy << ", " << FieldNo << ")"; -} - -void SCEVAllocSizeExpr::print(raw_ostream &OS) const { - OS << "sizeof(" << *AllocTy << ")"; -} - bool SCEVUnknown::isLoopInvariant(const Loop *L) const { // All non-instruction values are loop invariant. All instructions are loop // invariant if they are not contained in the specified loop. @@ -356,7 +363,91 @@ const Type *SCEVUnknown::getType() const { return V->getType(); } +bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const { + if (ConstantExpr *VCE = dyn_cast(V)) + if (VCE->getOpcode() == Instruction::PtrToInt) + if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (CE->getOpcode() == Instruction::GetElementPtr && + CE->getOperand(0)->isNullValue() && + CE->getNumOperands() == 2) + if (ConstantInt *CI = dyn_cast(CE->getOperand(1))) + if (CI->isOne()) { + AllocTy = cast(CE->getOperand(0)->getType()) + ->getElementType(); + return true; + } + + return false; +} + +bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const { + if (ConstantExpr *VCE = dyn_cast(V)) + if (VCE->getOpcode() == Instruction::PtrToInt) + if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (CE->getOpcode() == Instruction::GetElementPtr && + CE->getOperand(0)->isNullValue()) { + const Type *Ty = + cast(CE->getOperand(0)->getType())->getElementType(); + if (const StructType *STy = dyn_cast(Ty)) + if (!STy->isPacked() && + CE->getNumOperands() == 3 && + CE->getOperand(1)->isNullValue()) { + if (ConstantInt *CI = dyn_cast(CE->getOperand(2))) + if (CI->isOne() && + STy->getNumElements() == 2 && + STy->getElementType(0)->isIntegerTy(1)) { + AllocTy = STy->getElementType(1); + return true; + } + } + } + + return false; +} + +bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { + if (ConstantExpr *VCE = dyn_cast(V)) + if (VCE->getOpcode() == Instruction::PtrToInt) + if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (CE->getOpcode() == Instruction::GetElementPtr && + CE->getNumOperands() == 3 && + CE->getOperand(0)->isNullValue() && + CE->getOperand(1)->isNullValue()) { + const Type *Ty = + cast(CE->getOperand(0)->getType())->getElementType(); + // Ignore vector types here so that ScalarEvolutionExpander doesn't + // emit getelementptrs that index into vectors. + if (Ty->isStructTy() || Ty->isArrayTy()) { + CTy = Ty; + FieldNo = CE->getOperand(2); + return true; + } + } + + return false; +} + void SCEVUnknown::print(raw_ostream &OS) const { + const Type *AllocTy; + if (isSizeOf(AllocTy)) { + OS << "sizeof(" << *AllocTy << ")"; + return; + } + if (isAlignOf(AllocTy)) { + OS << "alignof(" << *AllocTy << ")"; + return; + } + + const Type *CTy; + Constant *FieldNo; + if (isOffsetOf(CTy, FieldNo)) { + OS << "offsetof(" << *CTy << ", "; + WriteAsOperand(OS, FieldNo, false); + OS << ")"; + return; + } + + // Otherwise just print it normally. WriteAsOperand(OS, V, false); } @@ -428,9 +519,9 @@ namespace { // Order pointer values after integer values. This helps SCEVExpander // form GEPs. - if (isa(LU->getType()) && !isa(RU->getType())) + if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy()) return false; - if (isa(RU->getType()) && !isa(LU->getType())) + if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy()) return true; // Compare getValueID values. @@ -515,21 +606,6 @@ namespace { return operator()(LC->getOperand(), RC->getOperand()); } - // Compare offsetof expressions. - if (const SCEVFieldOffsetExpr *LA = dyn_cast(LHS)) { - const SCEVFieldOffsetExpr *RA = cast(RHS); - if (CompareTypes(LA->getStructType(), RA->getStructType()) || - CompareTypes(RA->getStructType(), LA->getStructType())) - return CompareTypes(LA->getStructType(), RA->getStructType()); - return LA->getFieldNo() < RA->getFieldNo(); - } - - // Compare sizeof expressions by the allocation type. - if (const SCEVAllocSizeExpr *LA = dyn_cast(LHS)) { - const SCEVAllocSizeExpr *RA = cast(RHS); - return CompareTypes(LA->getAllocType(), RA->getAllocType()); - } - llvm_unreachable("Unknown SCEV kind!"); return false; } @@ -541,7 +617,7 @@ namespace { /// When this routine is finished, we know that any duplicates in the vector are /// consecutive and that complexity is monotonically increasing. /// -/// Note that we go take special precautions to ensure that we get determinstic +/// Note that we go take special precautions to ensure that we get deterministic /// results from this routine. In other words, we don't want the results of /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. @@ -669,7 +745,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, // We need at least W + T bits for the multiplication step unsigned CalculationBits = W + T; - // Calcuate 2^T, at width T+W. + // Calculate 2^T, at width T+W. APInt DivFactor = APInt(CalculationBits, 1).shl(T); // Calculate the multiplicative inverse of K! / 2^T; @@ -685,7 +761,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, CalculationBits); const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { - const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); + const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } @@ -746,7 +822,8 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast(Op)) return getConstant( - cast(ConstantExpr::getTrunc(SC->getValue(), Ty))); + cast(ConstantExpr::getTrunc(SC->getValue(), + getEffectiveSCEVType(Ty)))); // trunc(trunc(x)) --> trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast(Op)) @@ -768,11 +845,18 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, return getAddRecExpr(Operands, AddRec->getLoop()); } - // The cast wasn't folded; create an explicit cast node. - // Recompute the insert position, as it may have been invalidated. - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVTruncateExpr(ID, Op, Ty); + // As a special case, fold trunc(undef) to undef. We don't want to + // know too much about SCEVUnknowns, but this special case is handy + // and harmless. + if (const SCEVUnknown *U = dyn_cast(Op)) + if (isa(U->getValue())) + return getSCEV(UndefValue::get(Ty)); + + // The cast wasn't folded; create an explicit cast node. We can reuse + // the existing insert position since if we get here, we won't have + // made any changes which would invalidate it. + SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), + Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -786,12 +870,10 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. - if (const SCEVConstant *SC = dyn_cast(Op)) { - const Type *IntTy = getEffectiveSCEVType(Ty); - Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); - if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); - return getConstant(cast(C)); - } + if (const SCEVConstant *SC = dyn_cast(Op)) + return getConstant( + cast(ConstantExpr::getZExt(SC->getValue(), + getEffectiveSCEVType(Ty)))); // zext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) @@ -846,9 +928,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. - const SCEV *ZMul = - getMulExpr(CastedMaxBECount, - getTruncateOrZeroExtend(Step, Start->getType())); + const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); const SCEV *Add = getAddExpr(Start, ZMul); const SCEV *OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), @@ -862,9 +942,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, // Similar to above, only this time treat the step value as signed. // This covers loops that count down. - const SCEV *SMul = - getMulExpr(CastedMaxBECount, - getTruncateOrSignExtend(Step, Start->getType())); + const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); Add = getAddExpr(Start, SMul); OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), @@ -885,7 +963,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - getUnsignedRange(Step).getUnsignedMax()); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || - (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && + (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR->getPostIncExpr(*this), N))) // Return the expression with the addrec on the outside. @@ -895,8 +973,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, } else if (isKnownNegative(Step)) { const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - getSignedRange(Step).getSignedMin()); - if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) && - (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) || + if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || + (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR->getPostIncExpr(*this), N))) // Return the expression with the addrec on the outside. @@ -910,8 +988,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVZeroExtendExpr(ID, Op, Ty); + SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), + Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -925,12 +1003,10 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. - if (const SCEVConstant *SC = dyn_cast(Op)) { - const Type *IntTy = getEffectiveSCEVType(Ty); - Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); - if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); - return getConstant(cast(C)); - } + if (const SCEVConstant *SC = dyn_cast(Op)) + return getConstant( + cast(ConstantExpr::getSExt(SC->getValue(), + getEffectiveSCEVType(Ty)))); // sext(sext(x)) --> sext(x) if (const SCEVSignExtendExpr *SS = dyn_cast(Op)) @@ -985,9 +1061,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no signed overflow. - const SCEV *SMul = - getMulExpr(CastedMaxBECount, - getTruncateOrSignExtend(Step, Start->getType())); + const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); const SCEV *Add = getAddExpr(Start, SMul); const SCEV *OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), @@ -1001,9 +1075,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // Similar to above, only this time treat the step value as unsigned. // This covers loops that count up with an unsigned step. - const SCEV *UMul = - getMulExpr(CastedMaxBECount, - getTruncateOrZeroExtend(Step, Start->getType())); + const SCEV *UMul = getMulExpr(CastedMaxBECount, Step); Add = getAddExpr(Start, UMul); OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), @@ -1024,7 +1096,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - getSignedRange(Step).getSignedMax()); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || - (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && + (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR->getPostIncExpr(*this), N))) // Return the expression with the addrec on the outside. @@ -1035,7 +1107,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - getSignedRange(Step).getSignedMin()); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || - (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && + (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR->getPostIncExpr(*this), N))) // Return the expression with the addrec on the outside. @@ -1049,8 +1121,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVSignExtendExpr(ID, Op, Ty); + SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), + Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -1089,6 +1161,22 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, if (!isa(SExt)) return SExt; + // Force the cast to be folded into the operands of an addrec. + if (const SCEVAddRecExpr *AR = dyn_cast(Op)) { + SmallVector Ops; + for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); + I != E; ++I) + Ops.push_back(getAnyExtendExpr(*I, Ty)); + return getAddRecExpr(Ops, AR->getLoop()); + } + + // As a special case, fold anyext(undef) to undef. We don't want to + // know too much about SCEVUnknowns, but this special case is handy + // and harmless. + if (const SCEVUnknown *U = dyn_cast(Op)) + if (isa(U->getValue())) + return getSCEV(UndefValue::get(Ty)); + // If the expression is obviously signed, use the sext cast value. if (isa(Op)) return SExt; @@ -1126,23 +1214,34 @@ static bool CollectAddOperandsWithScales(DenseMap &M, SmallVector &NewOps, APInt &AccumulatedConstant, - const SmallVectorImpl &Ops, + const SCEV *const *Ops, size_t NumOperands, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; - // Iterate over the add operands. - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + // Iterate over the add operands. They are sorted, with constants first. + unsigned i = 0; + while (const SCEVConstant *C = dyn_cast(Ops[i])) { + ++i; + // Pull a buried constant out to the outside. + if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) + Interesting = true; + AccumulatedConstant += Scale * C->getValue()->getValue(); + } + + // Next comes everything else. We're especially interested in multiplies + // here, but they're in the middle, so just visit the rest with one loop. + for (; i != NumOperands; ++i) { const SCEVMulExpr *Mul = dyn_cast(Ops[i]); if (Mul && isa(Mul->getOperand(0))) { APInt NewScale = Scale * cast(Mul->getOperand(0))->getValue()->getValue(); if (Mul->getNumOperands() == 2 && isa(Mul->getOperand(1))) { // A multiplication of a constant with another add; recurse. + const SCEVAddExpr *Add = cast(Mul->getOperand(1)); Interesting |= CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, - cast(Mul->getOperand(1)) - ->getOperands(), + Add->op_begin(), Add->getNumOperands(), NewScale, SE); } else { // A multiplication of a constant with some other value. Update @@ -1160,11 +1259,6 @@ CollectAddOperandsWithScales(DenseMap &M, Interesting = true; } } - } else if (const SCEVConstant *C = dyn_cast(Ops[i])) { - // Pull a buried constant out to the outside. - if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) - Interesting = true; - AccumulatedConstant += Scale * C->getValue()->getValue(); } else { // An ordinary operand. Update the map. std::pair::iterator, bool> Pair = @@ -1198,12 +1292,23 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG + const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) - assert(getEffectiveSCEVType(Ops[i]->getType()) == - getEffectiveSCEVType(Ops[0]->getType()) && + assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVAddExpr operand types don't match!"); #endif + // If HasNSW is true and all the operands are non-negative, infer HasNUW. + if (!HasNUW && HasNSW) { + bool All = true; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + if (!isKnownNonNegative(Ops[i])) { + All = false; + break; + } + if (All) HasNUW = true; + } + // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); @@ -1222,13 +1327,13 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, } // If we are left with a constant zero being added, strip it off. - if (cast(Ops[0])->getValue()->isZero()) { + if (LHSC->getValue()->isZero()) { Ops.erase(Ops.begin()); --Idx; } - } - if (Ops.size() == 1) return Ops[0]; + if (Ops.size() == 1) return Ops[0]; + } // Okay, check to see if the same value occurs in the operand list twice. If // so, merge them together into an multiply expression. Since we sorted the @@ -1238,7 +1343,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Found a match, merge the two values into a multiply, and add any // remaining values to the result. - const SCEV *Two = getIntegerSCEV(2, Ty); + const SCEV *Two = getConstant(Ty, 2); const SCEV *Mul = getMulExpr(Ops[i], Two); if (Ops.size() == 2) return Mul; @@ -1267,9 +1372,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, } LargeOps.push_back(T->getOperand()); } else if (const SCEVConstant *C = dyn_cast(Ops[i])) { - // This could be either sign or zero extension, but sign extension - // is much more likely to be foldable here. - LargeOps.push_back(getSignExtendExpr(C, SrcType)); + LargeOps.push_back(getAnyExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { @@ -1282,9 +1385,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, LargeMulOps.push_back(T->getOperand()); } else if (const SCEVConstant *C = dyn_cast(M->getOperand(j))) { - // This could be either sign or zero extension, but sign extension - // is much more likely to be foldable here. - LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); + LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); } else { Ok = false; break; @@ -1316,14 +1417,14 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, while (const SCEVAddExpr *Add = dyn_cast(Ops[Idx])) { // If we have an add, expand the add operands onto the end of the operands // list. - Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); Ops.erase(Ops.begin()+Idx); + Ops.append(Add->op_begin(), Add->op_end()); DeletedAdd = true; } // If we deleted at least one add, we added operands to the end of the list, // and they are not necessarily sorted. Recurse to resort and resimplify - // any operands we just aquired. + // any operands we just acquired. if (DeletedAdd) return getAddExpr(Ops); } @@ -1340,7 +1441,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, SmallVector NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, - Ops, APInt(BitWidth, 1), *this)) { + Ops.data(), Ops.size(), + APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. @@ -1358,7 +1460,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); if (Ops.empty()) - return getIntegerSCEV(0, Ty); + return getConstant(Ty, 0); if (Ops.size() == 1) return Ops[0]; return getAddExpr(Ops); @@ -1383,7 +1485,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } - const SCEV *One = getIntegerSCEV(1, Ty); + const SCEV *One = getConstant(Ty, 1); const SCEV *AddOne = getAddExpr(InnerMul, One); const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]); if (Ops.size() == 2) return OuterMul; @@ -1447,8 +1549,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, // they are loop invariant w.r.t. the recurrence. SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); + const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) - if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { + if (Ops[i]->isLoopInvariant(AddRecLoop)) { LIOps.push_back(Ops[i]); Ops.erase(Ops.begin()+i); --i; --e; @@ -1463,9 +1566,11 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); - // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition - // is not associative so this isn't necessarily safe. - const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + // Build the new addrec. Propagate the NUW and NSW flags if both the + // outer add and the inner addrec are guaranteed to have no overflow. + const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, + HasNUW && AddRec->hasNoUnsignedWrap(), + HasNSW && AddRec->hasNoSignedWrap()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1486,19 +1591,19 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, OtherIdx < Ops.size() && isa(Ops[OtherIdx]);++OtherIdx) if (OtherIdx != Idx) { const SCEVAddRecExpr *OtherAddRec = cast(Ops[OtherIdx]); - if (AddRec->getLoop() == OtherAddRec->getLoop()) { + if (AddRecLoop == OtherAddRec->getLoop()) { // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} SmallVector NewOps(AddRec->op_begin(), AddRec->op_end()); for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= NewOps.size()) { - NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, + NewOps.append(OtherAddRec->op_begin()+i, OtherAddRec->op_end()); break; } NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); } - const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop); if (Ops.size() == 2) return NewAddRec; @@ -1521,21 +1626,26 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = 0; - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEVAddExpr *S = SCEVAllocator.Allocate(); - new (S) SCEVAddExpr(ID, Ops); - UniqueSCEVs.InsertNode(S, IP); + SCEVAddExpr *S = + static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); + if (!S) { + const SCEV **O = SCEVAllocator.Allocate(Ops.size()); + std::uninitialized_copy(Ops.begin(), Ops.end(), O); + S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), + O, Ops.size()); + UniqueSCEVs.InsertNode(S, IP); + } if (HasNUW) S->setHasNoUnsignedWrap(true); if (HasNSW) S->setHasNoSignedWrap(true); return S; } - /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, bool HasNUW, bool HasNSW) { assert(!Ops.empty() && "Cannot get empty mul!"); + if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == @@ -1543,6 +1653,17 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, "SCEVMulExpr operand types don't match!"); #endif + // If HasNSW is true and all the operands are non-negative, infer HasNUW. + if (!HasNUW && HasNSW) { + bool All = true; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + if (!isKnownNonNegative(Ops[i])) { + All = false; + break; + } + if (All) HasNUW = true; + } + // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); @@ -1558,7 +1679,6 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), getMulExpr(LHSC, Add->getOperand(1))); - ++Idx; while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! @@ -1578,30 +1698,46 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, } else if (cast(Ops[0])->getValue()->isZero()) { // If we have a multiply of zero, it will always be zero. return Ops[0]; + } else if (Ops[0]->isAllOnesValue()) { + // If we have a mul by -1 of an add, try distributing the -1 among the + // add operands. + if (Ops.size() == 2) + if (const SCEVAddExpr *Add = dyn_cast(Ops[1])) { + SmallVector NewOps; + bool AnyFolded = false; + for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); + I != E; ++I) { + const SCEV *Mul = getMulExpr(Ops[0], *I); + if (!isa(Mul)) AnyFolded = true; + NewOps.push_back(Mul); + } + if (AnyFolded) + return getAddExpr(NewOps); + } } + + if (Ops.size() == 1) + return Ops[0]; } // Skip over the add expression until we get to a multiply. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) ++Idx; - if (Ops.size() == 1) - return Ops[0]; - // If there are mul operands inline them all into this expression. if (Idx < Ops.size()) { bool DeletedMul = false; while (const SCEVMulExpr *Mul = dyn_cast(Ops[Idx])) { // If we have an mul, expand the mul operands onto the end of the operands // list. - Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); Ops.erase(Ops.begin()+Idx); + Ops.append(Mul->op_begin(), Mul->op_end()); DeletedMul = true; } // If we deleted at least one mul, we added operands to the end of the list, // and they are not necessarily sorted. Recurse to resort and resimplify - // any operands we just aquired. + // any operands we just acquired. if (DeletedMul) return getMulExpr(Ops); } @@ -1630,21 +1766,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); - if (LIOps.size() == 1) { - const SCEV *Scale = LIOps[0]; - for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) - NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); - } else { - for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - SmallVector MulOps(LIOps.begin(), LIOps.end()); - MulOps.push_back(AddRec->getOperand(i)); - NewOps.push_back(getMulExpr(MulOps)); - } - } + const SCEV *Scale = getMulExpr(LIOps); + for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) + NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); - // It's tempting to propagate the NSW flag here, but nsw multiplication - // is not associative so this isn't necessarily safe. - const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); + // Build the new addrec. Propagate the NUW and NSW flags if both the + // outer mul and the inner addrec are guaranteed to have no overflow. + const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), + HasNUW && AddRec->hasNoUnsignedWrap(), + HasNSW && AddRec->hasNoSignedWrap()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1698,10 +1828,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = 0; - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEVMulExpr *S = SCEVAllocator.Allocate(); - new (S) SCEVMulExpr(ID, Ops); - UniqueSCEVs.InsertNode(S, IP); + SCEVMulExpr *S = + static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); + if (!S) { + const SCEV **O = SCEVAllocator.Allocate(Ops.size()); + std::uninitialized_copy(Ops.begin(), Ops.end(), O); + S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), + O, Ops.size()); + UniqueSCEVs.InsertNode(S, IP); + } if (HasNUW) S->setHasNoUnsignedWrap(true); if (HasNSW) S->setHasNoSignedWrap(true); return S; @@ -1718,79 +1853,81 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, if (const SCEVConstant *RHSC = dyn_cast(RHS)) { if (RHSC->getValue()->equalsInt(1)) return LHS; // X udiv 1 --> x - if (RHSC->isZero()) - return getIntegerSCEV(0, LHS->getType()); // value is undefined - - // Determine if the division can be folded into the operands of - // its operands. - // TODO: Generalize this to non-constants by using known-bits information. - const Type *Ty = LHS->getType(); - unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); - unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; - // For non-power-of-two values, effectively round the value up to the - // nearest power of two. - if (!RHSC->getValue()->getValue().isPowerOf2()) - ++MaxShiftAmt; - const IntegerType *ExtTy = - IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); - // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. - if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) - if (const SCEVConstant *Step = - dyn_cast(AR->getStepRecurrence(*this))) - if (!Step->getValue()->getValue() - .urem(RHSC->getValue()->getValue()) && - getZeroExtendExpr(AR, ExtTy) == - getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), - getZeroExtendExpr(Step, ExtTy), - AR->getLoop())) { - SmallVector Operands; - for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) - Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); - return getAddRecExpr(Operands, AR->getLoop()); - } - // (A*B)/C --> A*(B/C) if safe and B/C can be folded. - if (const SCEVMulExpr *M = dyn_cast(LHS)) { - SmallVector Operands; - for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) - Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); - if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) - // Find an operand that's safely divisible. - for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { - const SCEV *Op = M->getOperand(i); - const SCEV *Div = getUDivExpr(Op, RHSC); - if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { - const SmallVectorImpl &MOperands = M->getOperands(); - Operands = SmallVector(MOperands.begin(), - MOperands.end()); - Operands[i] = Div; - return getMulExpr(Operands); + // If the denominator is zero, the result of the udiv is undefined. Don't + // try to analyze it, because the resolution chosen here may differ from + // the resolution chosen in other parts of the compiler. + if (!RHSC->getValue()->isZero()) { + // Determine if the division can be folded into the operands of + // its operands. + // TODO: Generalize this to non-constants by using known-bits information. + const Type *Ty = LHS->getType(); + unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); + unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; + // For non-power-of-two values, effectively round the value up to the + // nearest power of two. + if (!RHSC->getValue()->getValue().isPowerOf2()) + ++MaxShiftAmt; + const IntegerType *ExtTy = + IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); + // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. + if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) + if (const SCEVConstant *Step = + dyn_cast(AR->getStepRecurrence(*this))) + if (!Step->getValue()->getValue() + .urem(RHSC->getValue()->getValue()) && + getZeroExtendExpr(AR, ExtTy) == + getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), + getZeroExtendExpr(Step, ExtTy), + AR->getLoop())) { + SmallVector Operands; + for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) + Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); + return getAddRecExpr(Operands, AR->getLoop()); } + // (A*B)/C --> A*(B/C) if safe and B/C can be folded. + if (const SCEVMulExpr *M = dyn_cast(LHS)) { + SmallVector Operands; + for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) + Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); + if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) + // Find an operand that's safely divisible. + for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { + const SCEV *Op = M->getOperand(i); + const SCEV *Div = getUDivExpr(Op, RHSC); + if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { + Operands = SmallVector(M->op_begin(), + M->op_end()); + Operands[i] = Div; + return getMulExpr(Operands); + } + } + } + // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. + if (const SCEVAddRecExpr *A = dyn_cast(LHS)) { + SmallVector Operands; + for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) + Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); + if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { + Operands.clear(); + for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { + const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); + if (isa(Op) || + getMulExpr(Op, RHS) != A->getOperand(i)) + break; + Operands.push_back(Op); + } + if (Operands.size() == A->getNumOperands()) + return getAddExpr(Operands); } - } - // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. - if (const SCEVAddRecExpr *A = dyn_cast(LHS)) { - SmallVector Operands; - for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) - Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); - if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { - Operands.clear(); - for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { - const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); - if (isa(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) - break; - Operands.push_back(Op); - } - if (Operands.size() == A->getNumOperands()) - return getAddExpr(Operands); } - } - // Fold if both operands are constant. - if (const SCEVConstant *LHSC = dyn_cast(LHS)) { - Constant *LHSCV = LHSC->getValue(); - Constant *RHSCV = RHSC->getValue(); - return getConstant(cast(ConstantExpr::getUDiv(LHSCV, - RHSCV))); + // Fold if both operands are constant. + if (const SCEVConstant *LHSC = dyn_cast(LHS)) { + Constant *LHSCV = LHSC->getValue(); + Constant *RHSCV = RHSC->getValue(); + return getConstant(cast(ConstantExpr::getUDiv(LHSCV, + RHSCV))); + } } } @@ -1800,8 +1937,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, ID.AddPointer(RHS); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVUDivExpr(ID, LHS, RHS); + SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), + LHS, RHS); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -1816,8 +1953,7 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { - Operands.insert(Operands.end(), StepChrec->op_begin(), - StepChrec->op_end()); + Operands.append(StepChrec->op_begin(), StepChrec->op_end()); return getAddRecExpr(Operands, L); } @@ -1844,10 +1980,30 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X } + // It's tempting to want to call getMaxBackedgeTakenCount count here and + // use that information to infer NUW and NSW flags. However, computing a + // BE count requires calling getAddRecExpr, so we may not yet have a + // meaningful BE count at this point (and if we don't, we'd be stuck + // with a SCEVCouldNotCompute as the cached BE count). + + // If HasNSW is true and all the operands are non-negative, infer HasNUW. + if (!HasNUW && HasNSW) { + bool All = true; + for (unsigned i = 0, e = Operands.size(); i != e; ++i) + if (!isKnownNonNegative(Operands[i])) { + All = false; + break; + } + if (All) HasNUW = true; + } + // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop *NestedLoop = NestedAR->getLoop(); - if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { + if (L->contains(NestedLoop->getHeader()) ? + (L->getLoopDepth() < NestedLoop->getLoopDepth()) : + (!NestedLoop->contains(L->getHeader()) && + DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { SmallVector NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); Operands[0] = NestedAR->getStart(); @@ -1877,6 +2033,8 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, } } + // Okay, it looks like we really DO need an addrec expr. Check to see if we + // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); ID.AddInteger(Operands.size()); @@ -1884,10 +2042,15 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, ID.AddPointer(Operands[i]); ID.AddPointer(L); void *IP = 0; - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEVAddRecExpr *S = SCEVAllocator.Allocate(); - new (S) SCEVAddRecExpr(ID, Operands, L); - UniqueSCEVs.InsertNode(S, IP); + SCEVAddRecExpr *S = + static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); + if (!S) { + const SCEV **O = SCEVAllocator.Allocate(Operands.size()); + std::uninitialized_copy(Operands.begin(), Operands.end(), O); + S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), + O, Operands.size(), L); + UniqueSCEVs.InsertNode(S, IP); + } if (HasNUW) S->setHasNoUnsignedWrap(true); if (HasNSW) S->setHasNoSignedWrap(true); return S; @@ -1940,9 +2103,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { // maximum-int. return Ops[0]; } - } - if (Ops.size() == 1) return Ops[0]; + if (Ops.size() == 1) return Ops[0]; + } // Find the first SMax while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) @@ -1953,8 +2116,8 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { if (Idx < Ops.size()) { bool DeletedSMax = false; while (const SCEVSMaxExpr *SMax = dyn_cast(Ops[Idx])) { - Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); Ops.erase(Ops.begin()+Idx); + Ops.append(SMax->op_begin(), SMax->op_end()); DeletedSMax = true; } @@ -1966,7 +2129,13 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { // so, delete one. Since we sorted the list, these values are required to // be adjacent. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) - if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y + // X smax Y smax Y --> X smax Y + // X smax Y --> X, if X is always greater than Y + if (Ops[i] == Ops[i+1] || + isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { + Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); + --i; --e; + } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i, Ops.begin()+i+1); --i; --e; } @@ -1984,8 +2153,10 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { ID.AddPointer(Ops[i]); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVSMaxExpr(ID, Ops); + const SCEV **O = SCEVAllocator.Allocate(Ops.size()); + std::uninitialized_copy(Ops.begin(), Ops.end(), O); + SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), + O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -2037,9 +2208,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { // maximum-int. return Ops[0]; } - } - if (Ops.size() == 1) return Ops[0]; + if (Ops.size() == 1) return Ops[0]; + } // Find the first UMax while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) @@ -2050,8 +2221,8 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { if (Idx < Ops.size()) { bool DeletedUMax = false; while (const SCEVUMaxExpr *UMax = dyn_cast(Ops[Idx])) { - Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); Ops.erase(Ops.begin()+Idx); + Ops.append(UMax->op_begin(), UMax->op_end()); DeletedUMax = true; } @@ -2063,7 +2234,13 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { // so, delete one. Since we sorted the list, these values are required to // be adjacent. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) - if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y + // X umax Y umax Y --> X umax Y + // X umax Y --> X, if X is always greater than Y + if (Ops[i] == Ops[i+1] || + isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { + Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); + --i; --e; + } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i, Ops.begin()+i+1); --i; --e; } @@ -2081,8 +2258,10 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { ID.AddPointer(Ops[i]); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVUMaxExpr(ID, Ops); + const SCEV **O = SCEVAllocator.Allocate(Ops.size()); + std::uninitialized_copy(Ops.begin(), Ops.end(), O); + SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), + O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -2099,74 +2278,56 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } -const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy, - unsigned FieldNo) { - // If we have TargetData we can determine the constant offset. - if (TD) { - const Type *IntPtrTy = TD->getIntPtrType(getContext()); - const StructLayout &SL = *TD->getStructLayout(STy); - uint64_t Offset = SL.getElementOffset(FieldNo); - return getIntegerSCEV(Offset, IntPtrTy); - } - - // Field 0 is always at offset 0. - if (FieldNo == 0) { - const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); - return getIntegerSCEV(0, Ty); - } +const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) { + // If we have TargetData, we can bypass creating a target-independent + // constant expression and then folding it back into a ConstantInt. + // This is just a compile-time optimization. + if (TD) + return getConstant(TD->getIntPtrType(getContext()), + TD->getTypeAllocSize(AllocTy)); - // Okay, it looks like we really DO need an offsetof expr. Check to see if we - // already have one, otherwise create a new one. - FoldingSetNodeID ID; - ID.AddInteger(scFieldOffset); - ID.AddPointer(STy); - ID.AddInteger(FieldNo); - void *IP = 0; - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); - new (S) SCEVFieldOffsetExpr(ID, Ty, STy, FieldNo); - UniqueSCEVs.InsertNode(S, IP); - return S; + Constant *C = ConstantExpr::getSizeOf(AllocTy); + if (ConstantExpr *CE = dyn_cast(C)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) + C = Folded; + const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); + return getTruncateOrZeroExtend(getSCEV(C), Ty); } -const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) { - // If we have TargetData we can determine the constant size. - if (TD && AllocTy->isSized()) { - const Type *IntPtrTy = TD->getIntPtrType(getContext()); - return getIntegerSCEV(TD->getTypeAllocSize(AllocTy), IntPtrTy); - } +const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) { + Constant *C = ConstantExpr::getAlignOf(AllocTy); + if (ConstantExpr *CE = dyn_cast(C)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) + C = Folded; + const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); + return getTruncateOrZeroExtend(getSCEV(C), Ty); +} - // Expand an array size into the element size times the number - // of elements. - if (const ArrayType *ATy = dyn_cast(AllocTy)) { - const SCEV *E = getAllocSizeExpr(ATy->getElementType()); - return getMulExpr( - E, getConstant(ConstantInt::get(cast(E->getType()), - ATy->getNumElements()))); - } +const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy, + unsigned FieldNo) { + // If we have TargetData, we can bypass creating a target-independent + // constant expression and then folding it back into a ConstantInt. + // This is just a compile-time optimization. + if (TD) + return getConstant(TD->getIntPtrType(getContext()), + TD->getStructLayout(STy)->getElementOffset(FieldNo)); - // Expand a vector size into the element size times the number - // of elements. - if (const VectorType *VTy = dyn_cast(AllocTy)) { - const SCEV *E = getAllocSizeExpr(VTy->getElementType()); - return getMulExpr( - E, getConstant(ConstantInt::get(cast(E->getType()), - VTy->getNumElements()))); - } + Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); + if (ConstantExpr *CE = dyn_cast(C)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) + C = Folded; + const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); + return getTruncateOrZeroExtend(getSCEV(C), Ty); +} - // Okay, it looks like we really DO need a sizeof expr. Check to see if we - // already have one, otherwise create a new one. - FoldingSetNodeID ID; - ID.AddInteger(scAllocSize); - ID.AddPointer(AllocTy); - void *IP = 0; - if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); - new (S) SCEVAllocSizeExpr(ID, Ty, AllocTy); - UniqueSCEVs.InsertNode(S, IP); - return S; +const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy, + Constant *FieldNo) { + Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); + if (ConstantExpr *CE = dyn_cast(C)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) + C = Folded; + const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); + return getTruncateOrZeroExtend(getSCEV(C), Ty); } const SCEV *ScalarEvolution::getUnknown(Value *V) { @@ -2180,8 +2341,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) { ID.AddPointer(V); void *IP = 0; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; - SCEV *S = SCEVAllocator.Allocate(); - new (S) SCEVUnknown(ID, V); + SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V); UniqueSCEVs.InsertNode(S, IP); return S; } @@ -2196,7 +2356,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) { /// has access to target-specific information. bool ScalarEvolution::isSCEVable(const Type *Ty) const { // Integers and pointers are always SCEVable. - return Ty->isInteger() || isa(Ty); + return Ty->isIntegerTy() || Ty->isPointerTy(); } /// getTypeSizeInBits - Return the size in bits of the specified type, @@ -2209,12 +2369,12 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { return TD->getTypeSizeInBits(Ty); // Integer types have fixed sizes. - if (Ty->isInteger()) + if (Ty->isIntegerTy()) return Ty->getPrimitiveSizeInBits(); // The only other support type is pointer. Without TargetData, conservatively // assume pointers are 64-bit. - assert(isa(Ty) && "isSCEVable permitted a non-SCEVable type!"); + assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); return 64; } @@ -2225,11 +2385,11 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); - if (Ty->isInteger()) + if (Ty->isIntegerTy()) return Ty; // The only other support type is pointer. - assert(isa(Ty) && "Unexpected non-pointer non-integer type!"); + assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); if (TD) return TD->getIntPtrType(getContext()); // Without TargetData, conservatively assume pointers are 64-bit. @@ -2252,13 +2412,6 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) { return S; } -/// getIntegerSCEV - Given a SCEVable type, create a constant for the -/// specified signed integer value and return a SCEV for the constant. -const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { - const IntegerType *ITy = cast(getEffectiveSCEVType(Ty)); - return getConstant(ConstantInt::get(ITy, Val)); -} - /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V /// const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { @@ -2289,6 +2442,10 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { /// const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS) { + // Fast path: X - X --> 0. + if (LHS == RHS) + return getConstant(LHS->getType(), 0); + // X - Y --> X + -Y return getAddExpr(LHS, getNegativeSCEV(RHS)); } @@ -2300,8 +2457,8 @@ const SCEV * ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion @@ -2317,8 +2474,8 @@ const SCEV * ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion @@ -2333,8 +2490,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, const SCEV * ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or zero extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrZeroExtend cannot truncate!"); @@ -2349,8 +2506,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or sign extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrSignExtend cannot truncate!"); @@ -2366,8 +2523,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or any extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrAnyExtend cannot truncate!"); @@ -2381,8 +2538,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isInteger() || isa(SrcTy)) && - (Ty->isInteger() || isa(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or noop with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && "getTruncateOrNoop cannot extend!"); @@ -2439,12 +2596,12 @@ PushDefUseChildren(Instruction *I, /// the Scalars map if they reference SymName. This is used during PHI /// resolution. void -ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) { +ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { SmallVector Worklist; - PushDefUseChildren(I, Worklist); + PushDefUseChildren(PN, Worklist); SmallPtrSet Visited; - Visited.insert(I); + Visited.insert(PN); while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I)) continue; @@ -2454,16 +2611,19 @@ ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) { if (It != Scalars.end()) { // Short-circuit the def-use traversal if the symbolic name // ceases to appear in expressions. - if (!It->second->hasOperand(SymName)) + if (It->second != SymName && !It->second->hasOperand(SymName)) continue; // SCEVUnknown for a PHI either means that it has an unrecognized - // structure, or it's a PHI that's in the progress of being computed - // by createNodeForPHI. In the former case, additional loop trip - // count information isn't going to change anything. In the later - // case, createNodeForPHI will perform the necessary updates on its - // own when it gets to that point. - if (!isa(I) || !isa(It->second)) { + // structure, it's a PHI that's in the progress of being computed + // by createNodeForPHI, or it's a single-value PHI. In the first case, + // additional loop trip count information isn't going to change anything. + // In the second case, createNodeForPHI will perform the necessary + // updates on its own when it gets to that point. In the third, we do + // want to forget the SCEVUnknown. + if (!isa(I) || + !isa(It->second) || + (I != PN && It->second == SymName)) { ValuesAtScopes.erase(It->second); Scalars.erase(It); } @@ -2477,14 +2637,29 @@ ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) { /// a loop header, making it a potential recurrence, or it doesn't. /// const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { - if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. - if (const Loop *L = LI->getLoopFor(PN->getParent())) - if (L->getHeader() == PN->getParent()) { - // If it lives in the loop header, it has two incoming values, one - // from outside the loop, and one from inside. - unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); - unsigned BackEdge = IncomingEdge^1; - + if (const Loop *L = LI->getLoopFor(PN->getParent())) + if (L->getHeader() == PN->getParent()) { + // The loop may have multiple entrances or multiple exits; we can analyze + // this phi as an addrec if it has a unique entry value and a unique + // backedge value. + Value *BEValueV = 0, *StartValueV = 0; + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + Value *V = PN->getIncomingValue(i); + if (L->contains(PN->getIncomingBlock(i))) { + if (!BEValueV) { + BEValueV = V; + } else if (BEValueV != V) { + BEValueV = 0; + break; + } + } else if (!StartValueV) { + StartValueV = V; + } else if (StartValueV != V) { + StartValueV = 0; + break; + } + } + if (BEValueV && StartValueV) { // While we are analyzing this PHI node, handle its value symbolically. const SCEV *SymbolicName = getUnknown(PN); assert(Scalars.find(PN) == Scalars.end() && @@ -2493,7 +2668,6 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. - Value *BEValueV = PN->getIncomingValue(BackEdge); const SCEV *BEValue = getSCEV(BEValueV); // NOTE: If BEValue is loop invariant, we know that the PHI node just @@ -2525,31 +2699,27 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { if (Accum->isLoopInvariant(L) || (isa(Accum) && cast(Accum)->getLoop() == L)) { - const SCEV *StartVal = - getSCEV(PN->getIncomingValue(IncomingEdge)); - const SCEVAddRecExpr *PHISCEV = - cast(getAddRecExpr(StartVal, Accum, L)); - - // If the increment doesn't overflow, then neither the addrec nor the - // post-increment will overflow. - if (const AddOperator *OBO = dyn_cast(BEValueV)) - if (OBO->getOperand(0) == PN && - getSCEV(OBO->getOperand(1)) == - PHISCEV->getStepRecurrence(*this)) { - const SCEVAddRecExpr *PostInc = PHISCEV->getPostIncExpr(*this); - if (OBO->hasNoUnsignedWrap()) { - const_cast(PHISCEV) - ->setHasNoUnsignedWrap(true); - const_cast(PostInc) - ->setHasNoUnsignedWrap(true); - } - if (OBO->hasNoSignedWrap()) { - const_cast(PHISCEV) - ->setHasNoSignedWrap(true); - const_cast(PostInc) - ->setHasNoSignedWrap(true); - } - } + bool HasNUW = false; + bool HasNSW = false; + + // If the increment doesn't overflow, then neither the addrec nor + // the post-increment will overflow. + if (const AddOperator *OBO = dyn_cast(BEValueV)) { + if (OBO->hasNoUnsignedWrap()) + HasNUW = true; + if (OBO->hasNoSignedWrap()) + HasNSW = true; + } + + const SCEV *StartVal = getSCEV(StartValueV); + const SCEV *PHISCEV = + getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW); + + // Since the no-wrap flags are on the increment, they apply to the + // post-incremented value as well. + if (Accum->isLoopInvariant(L)) + (void)getAddRecExpr(getAddExpr(StartVal, Accum), + Accum, L, HasNUW, HasNSW); // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the @@ -2567,12 +2737,12 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { // Because the other in-value of i (0) fits the evolution of BEValue // i really is an addrec evolution. if (AddRec->getLoop() == L && AddRec->isAffine()) { - const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); + const SCEV *StartVal = getSCEV(StartValueV); // If StartVal = j.start - j.stride, we can use StartVal as the // initial step of the addrec evolution. if (StartVal == getMinusSCEV(AddRec->getOperand(0), - AddRec->getOperand(1))) { + AddRec->getOperand(1))) { const SCEV *PHISCEV = getAddRecExpr(StartVal, AddRec->getOperand(1), L); @@ -2585,13 +2755,24 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { } } } - - return SymbolicName; } + } - // It's tempting to recognize PHIs with a unique incoming value, however - // this leads passes like indvars to break LCSSA form. Fortunately, such - // PHIs are rare, as instcombine zaps them. + // If the PHI has a single incoming value, follow that value, unless the + // PHI's incoming blocks are in a different loop, in which case doing so + // risks breaking LCSSA form. Instcombine would normally zap these, but + // it doesn't have DominatorTree information, so it may miss cases. + if (Value *V = PN->hasConstantValue(DT)) { + bool AllSameLoop = true; + Loop *PNLoop = LI->getLoopFor(PN->getParent()); + for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i) + if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) { + AllSameLoop = false; + break; + } + if (AllSameLoop) + return getSCEV(V); + } // If it's not a loop phi, we can't handle it yet. return getUnknown(PN); @@ -2602,13 +2783,17 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { /// const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { - bool InBounds = GEP->isInBounds(); + // Don't blindly transfer the inbounds flag from the GEP instruction to the + // Add expression, because the Instruction may be guarded by control flow + // and the no-overflow bits may not be valid for the expression in any + // context. + const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); Value *Base = GEP->getOperand(0); // Don't attempt to analyze GEPs over unsized objects. if (!cast(Base->getType())->getElementType()->isSized()) return getUnknown(GEP); - const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy); + const SCEV *TotalOffset = getConstant(IntPtrTy, 0); gep_type_iterator GTI = gep_type_begin(GEP); for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), E = GEP->op_end(); @@ -2618,24 +2803,30 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { if (const StructType *STy = dyn_cast(*GTI++)) { // For a struct, add the member offset. unsigned FieldNo = cast(Index)->getZExtValue(); - TotalOffset = getAddExpr(TotalOffset, - getFieldOffsetExpr(STy, FieldNo), - /*HasNUW=*/false, /*HasNSW=*/InBounds); + const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo); + + // Add the field offset to the running total offset. + TotalOffset = getAddExpr(TotalOffset, FieldOffset); } else { // For an array, add the element offset, explicitly scaled. - const SCEV *LocalOffset = getSCEV(Index); - if (!isa(LocalOffset->getType())) - // Getelementptr indicies are signed. - LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy); - // Lower "inbounds" GEPs to NSW arithmetic. - LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI), - /*HasNUW=*/false, /*HasNSW=*/InBounds); - TotalOffset = getAddExpr(TotalOffset, LocalOffset, - /*HasNUW=*/false, /*HasNSW=*/InBounds); + const SCEV *ElementSize = getSizeOfExpr(*GTI); + const SCEV *IndexS = getSCEV(Index); + // Getelementptr indices are signed. + IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy); + + // Multiply the index by the element size to compute the element offset. + const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize); + + // Add the element offset to the running total offset. + TotalOffset = getAddExpr(TotalOffset, LocalOffset); } } - return getAddExpr(getSCEV(Base), TotalOffset, - /*HasNUW=*/false, /*HasNSW=*/InBounds); + + // Get the SCEV for the GEP base. + const SCEV *BaseS = getSCEV(Base); + + // Add the total offset from all the GEP indices to the base. + return getAddExpr(BaseS, TotalOffset); } /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is @@ -2727,106 +2918,129 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) { if (const SCEVConstant *C = dyn_cast(S)) return ConstantRange(C->getValue()->getValue()); + unsigned BitWidth = getTypeSizeInBits(S->getType()); + ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); + + // If the value has known zeros, the maximum unsigned value will have those + // known zeros as well. + uint32_t TZ = GetMinTrailingZeros(S); + if (TZ != 0) + ConservativeResult = + ConstantRange(APInt::getMinValue(BitWidth), + APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); + if (const SCEVAddExpr *Add = dyn_cast(S)) { ConstantRange X = getUnsignedRange(Add->getOperand(0)); for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.add(getUnsignedRange(Add->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVMulExpr *Mul = dyn_cast(S)) { ConstantRange X = getUnsignedRange(Mul->getOperand(0)); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getUnsignedRange(Mul->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVSMaxExpr *SMax = dyn_cast(S)) { ConstantRange X = getUnsignedRange(SMax->getOperand(0)); for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) X = X.smax(getUnsignedRange(SMax->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVUMaxExpr *UMax = dyn_cast(S)) { ConstantRange X = getUnsignedRange(UMax->getOperand(0)); for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) X = X.umax(getUnsignedRange(UMax->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVUDivExpr *UDiv = dyn_cast(S)) { ConstantRange X = getUnsignedRange(UDiv->getLHS()); ConstantRange Y = getUnsignedRange(UDiv->getRHS()); - return X.udiv(Y); + return ConservativeResult.intersectWith(X.udiv(Y)); } if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) { ConstantRange X = getUnsignedRange(ZExt->getOperand()); - return X.zeroExtend(cast(ZExt->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); } if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) { ConstantRange X = getUnsignedRange(SExt->getOperand()); - return X.signExtend(cast(SExt->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.signExtend(BitWidth)); } if (const SCEVTruncateExpr *Trunc = dyn_cast(S)) { ConstantRange X = getUnsignedRange(Trunc->getOperand()); - return X.truncate(cast(Trunc->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.truncate(BitWidth)); } - ConstantRange FullSet(getTypeSizeInBits(S->getType()), true); - if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { - const SCEV *T = getBackedgeTakenCount(AddRec->getLoop()); - const SCEVConstant *Trip = dyn_cast(T); - if (!Trip) return FullSet; + // If there's no unsigned wrap, the value will never be less than its + // initial value. + if (AddRec->hasNoUnsignedWrap()) + if (const SCEVConstant *C = dyn_cast(AddRec->getStart())) + if (!C->getValue()->isZero()) + ConservativeResult = + ConservativeResult.intersectWith( + ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); // TODO: non-affine addrec if (AddRec->isAffine()) { const Type *Ty = AddRec->getType(); const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); - if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) { + if (!isa(MaxBECount) && + getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); const SCEV *Start = AddRec->getStart(); const SCEV *Step = AddRec->getStepRecurrence(*this); - const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); - - // Check for overflow. - // TODO: This is very conservative. - if (!(Step->isOne() && - isKnownPredicate(ICmpInst::ICMP_ULT, Start, End)) && - !(Step->isAllOnesValue() && - isKnownPredicate(ICmpInst::ICMP_UGT, Start, End))) - return FullSet; ConstantRange StartRange = getUnsignedRange(Start); - ConstantRange EndRange = getUnsignedRange(End); + ConstantRange StepRange = getSignedRange(Step); + ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); + ConstantRange EndRange = + StartRange.add(MaxBECountRange.multiply(StepRange)); + + // Check for overflow. This must be done with ConstantRange arithmetic + // because we could be called from within the ScalarEvolution overflow + // checking code. + ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1); + ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); + ConstantRange ExtMaxBECountRange = + MaxBECountRange.zextOrTrunc(BitWidth*2+1); + ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1); + if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != + ExtEndRange) + return ConservativeResult; + APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), EndRange.getUnsignedMin()); APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), EndRange.getUnsignedMax()); if (Min.isMinValue() && Max.isMaxValue()) - return FullSet; - return ConstantRange(Min, Max+1); + return ConservativeResult; + return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); } } + + return ConservativeResult; } if (const SCEVUnknown *U = dyn_cast(S)) { // For a SCEVUnknown, ask ValueTracking. - unsigned BitWidth = getTypeSizeInBits(U->getType()); APInt Mask = APInt::getAllOnesValue(BitWidth); APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); if (Ones == ~Zeros + 1) - return FullSet; - return ConstantRange(Ones, ~Zeros + 1); + return ConservativeResult; + return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); } - return FullSet; + return ConservativeResult; } /// getSignedRange - Determine the signed range for a particular SCEV. @@ -2837,106 +3051,141 @@ ScalarEvolution::getSignedRange(const SCEV *S) { if (const SCEVConstant *C = dyn_cast(S)) return ConstantRange(C->getValue()->getValue()); + unsigned BitWidth = getTypeSizeInBits(S->getType()); + ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); + + // If the value has known zeros, the maximum signed value will have those + // known zeros as well. + uint32_t TZ = GetMinTrailingZeros(S); + if (TZ != 0) + ConservativeResult = + ConstantRange(APInt::getSignedMinValue(BitWidth), + APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); + if (const SCEVAddExpr *Add = dyn_cast(S)) { ConstantRange X = getSignedRange(Add->getOperand(0)); for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.add(getSignedRange(Add->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVMulExpr *Mul = dyn_cast(S)) { ConstantRange X = getSignedRange(Mul->getOperand(0)); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getSignedRange(Mul->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVSMaxExpr *SMax = dyn_cast(S)) { ConstantRange X = getSignedRange(SMax->getOperand(0)); for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) X = X.smax(getSignedRange(SMax->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVUMaxExpr *UMax = dyn_cast(S)) { ConstantRange X = getSignedRange(UMax->getOperand(0)); for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) X = X.umax(getSignedRange(UMax->getOperand(i))); - return X; + return ConservativeResult.intersectWith(X); } if (const SCEVUDivExpr *UDiv = dyn_cast(S)) { ConstantRange X = getSignedRange(UDiv->getLHS()); ConstantRange Y = getSignedRange(UDiv->getRHS()); - return X.udiv(Y); + return ConservativeResult.intersectWith(X.udiv(Y)); } if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) { ConstantRange X = getSignedRange(ZExt->getOperand()); - return X.zeroExtend(cast(ZExt->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); } if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) { ConstantRange X = getSignedRange(SExt->getOperand()); - return X.signExtend(cast(SExt->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.signExtend(BitWidth)); } if (const SCEVTruncateExpr *Trunc = dyn_cast(S)) { ConstantRange X = getSignedRange(Trunc->getOperand()); - return X.truncate(cast(Trunc->getType())->getBitWidth()); + return ConservativeResult.intersectWith(X.truncate(BitWidth)); } - ConstantRange FullSet(getTypeSizeInBits(S->getType()), true); - if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { - const SCEV *T = getBackedgeTakenCount(AddRec->getLoop()); - const SCEVConstant *Trip = dyn_cast(T); - if (!Trip) return FullSet; + // If there's no signed wrap, and all the operands have the same sign or + // zero, the value won't ever change sign. + if (AddRec->hasNoSignedWrap()) { + bool AllNonNeg = true; + bool AllNonPos = true; + for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { + if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; + if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; + } + if (AllNonNeg) + ConservativeResult = ConservativeResult.intersectWith( + ConstantRange(APInt(BitWidth, 0), + APInt::getSignedMinValue(BitWidth))); + else if (AllNonPos) + ConservativeResult = ConservativeResult.intersectWith( + ConstantRange(APInt::getSignedMinValue(BitWidth), + APInt(BitWidth, 1))); + } // TODO: non-affine addrec if (AddRec->isAffine()) { const Type *Ty = AddRec->getType(); const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); - if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) { + if (!isa(MaxBECount) && + getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); const SCEV *Start = AddRec->getStart(); const SCEV *Step = AddRec->getStepRecurrence(*this); - const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); - - // Check for overflow. - // TODO: This is very conservative. - if (!(Step->isOne() && - isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) && - !(Step->isAllOnesValue() && - isKnownPredicate(ICmpInst::ICMP_SGT, Start, End))) - return FullSet; ConstantRange StartRange = getSignedRange(Start); - ConstantRange EndRange = getSignedRange(End); + ConstantRange StepRange = getSignedRange(Step); + ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); + ConstantRange EndRange = + StartRange.add(MaxBECountRange.multiply(StepRange)); + + // Check for overflow. This must be done with ConstantRange arithmetic + // because we could be called from within the ScalarEvolution overflow + // checking code. + ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1); + ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); + ConstantRange ExtMaxBECountRange = + MaxBECountRange.zextOrTrunc(BitWidth*2+1); + ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1); + if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != + ExtEndRange) + return ConservativeResult; + APInt Min = APIntOps::smin(StartRange.getSignedMin(), EndRange.getSignedMin()); APInt Max = APIntOps::smax(StartRange.getSignedMax(), EndRange.getSignedMax()); if (Min.isMinSignedValue() && Max.isMaxSignedValue()) - return FullSet; - return ConstantRange(Min, Max+1); + return ConservativeResult; + return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); } } + + return ConservativeResult; } if (const SCEVUnknown *U = dyn_cast(S)) { // For a SCEVUnknown, ask ValueTracking. - unsigned BitWidth = getTypeSizeInBits(U->getType()); + if (!U->getValue()->getType()->isIntegerTy() && !TD) + return ConservativeResult; unsigned NS = ComputeNumSignBits(U->getValue(), TD); if (NS == 1) - return FullSet; - return + return ConservativeResult; + return ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), - APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1); + APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)); } - return FullSet; + return ConservativeResult; } /// createSCEV - We know that there is no SCEV for the specified value. @@ -2947,16 +3196,21 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { return getUnknown(V); unsigned Opcode = Instruction::UserOp1; - if (Instruction *I = dyn_cast(V)) + if (Instruction *I = dyn_cast(V)) { Opcode = I->getOpcode(); - else if (ConstantExpr *CE = dyn_cast(V)) + + // Don't attempt to analyze instructions in blocks that aren't + // reachable. Such instructions don't matter, and they aren't required + // to obey basic rules for definitions dominating uses which this + // analysis depends on. + if (!DT->isReachableFromEntry(I->getParent())) + return getUnknown(V); + } else if (ConstantExpr *CE = dyn_cast(V)) Opcode = CE->getOpcode(); else if (ConstantInt *CI = dyn_cast(V)) return getConstant(CI); else if (isa(V)) - return getIntegerSCEV(0, V->getType()); - else if (isa(V)) - return getIntegerSCEV(0, V->getType()); + return getConstant(V->getType(), 0); else if (GlobalAlias *GA = dyn_cast(V)) return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); else @@ -2965,15 +3219,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { Operator *U = cast(V); switch (Opcode) { case Instruction::Add: - // Don't transfer the NSW and NUW bits from the Add instruction to the - // Add expression, because the Instruction may be guarded by control - // flow and the no-overflow bits may not be valid for the expression in - // any context. return getAddExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); case Instruction::Mul: - // Don't transfer the NSW and NUW bits from the Mul instruction to the - // Mul expression, as with Add. return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); case Instruction::UDiv: @@ -3066,7 +3314,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { const Type *Z0Ty = Z0->getType(); unsigned Z0TySize = getTypeSizeInBits(Z0Ty); - // If C is a low-bits mask, the zero extend is zerving to + // If C is a low-bits mask, the zero extend is serving to // mask off the high bits. Complement the operand and // re-apply the zext. if (APIntOps::isMask(Z0TySize, CI->getValue())) @@ -3087,9 +3335,17 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { case Instruction::Shl: // Turn shift left of a constant amount into a multiply. if (ConstantInt *SA = dyn_cast(U->getOperand(1))) { - uint32_t BitWidth = cast(V->getType())->getBitWidth(); + uint32_t BitWidth = cast(U->getType())->getBitWidth(); + + // If the shift count is not less than the bitwidth, the result of + // the shift is undefined. Don't try to analyze it, because the + // resolution chosen here may differ from the resolution chosen in + // other parts of the compiler. + if (SA->getValue().uge(BitWidth)) + break; + Constant *X = ConstantInt::get(getContext(), - APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); + APInt(BitWidth, 1).shl(SA->getZExtValue())); return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); } break; @@ -3097,9 +3353,17 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { case Instruction::LShr: // Turn logical shift right of a constant into a unsigned divide. if (ConstantInt *SA = dyn_cast(U->getOperand(1))) { - uint32_t BitWidth = cast(V->getType())->getBitWidth(); + uint32_t BitWidth = cast(U->getType())->getBitWidth(); + + // If the shift count is not less than the bitwidth, the result of + // the shift is undefined. Don't try to analyze it, because the + // resolution chosen here may differ from the resolution chosen in + // other parts of the compiler. + if (SA->getValue().uge(BitWidth)) + break; + Constant *X = ConstantInt::get(getContext(), - APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); + APInt(BitWidth, 1).shl(SA->getZExtValue())); return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); } break; @@ -3107,19 +3371,26 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { case Instruction::AShr: // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. if (ConstantInt *CI = dyn_cast(U->getOperand(1))) - if (Instruction *L = dyn_cast(U->getOperand(0))) + if (Operator *L = dyn_cast(U->getOperand(0))) if (L->getOpcode() == Instruction::Shl && L->getOperand(1) == U->getOperand(1)) { - unsigned BitWidth = getTypeSizeInBits(U->getType()); + uint64_t BitWidth = getTypeSizeInBits(U->getType()); + + // If the shift count is not less than the bitwidth, the result of + // the shift is undefined. Don't try to analyze it, because the + // resolution chosen here may differ from the resolution chosen in + // other parts of the compiler. + if (CI->getValue().uge(BitWidth)) + break; + uint64_t Amt = BitWidth - CI->getZExtValue(); if (Amt == BitWidth) return getSCEV(L->getOperand(0)); // shift by zero --> noop - if (Amt > BitWidth) - return getIntegerSCEV(0, U->getType()); // value is undefined return getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), - IntegerType::get(getContext(), Amt)), - U->getType()); + IntegerType::get(getContext(), + Amt)), + U->getType()); } break; @@ -3138,10 +3409,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { return getSCEV(U->getOperand(0)); break; - // It's tempting to handle inttoptr and ptrtoint, however this can - // lead to pointer expressions which cannot be expanded to GEPs - // (because they may overflow). For now, the only pointer-typed - // expressions we handle are GEPs and address literals. + // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can + // lead to pointer expressions which cannot safely be expanded to GEPs, + // because ScalarEvolution doesn't respect the GEP aliasing rules when + // simplifying integer expressions. case Instruction::GetElementPtr: return createNodeForGEP(cast(U)); @@ -3162,10 +3433,22 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { // fall through case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: - if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) - return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); - else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) - return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); + // a >s b ? a+x : b+x -> smax(a, b)+x + // a >s b ? b+x : a+x -> smin(a, b)+x + if (LHS->getType() == U->getType()) { + const SCEV *LS = getSCEV(LHS); + const SCEV *RS = getSCEV(RHS); + const SCEV *LA = getSCEV(U->getOperand(1)); + const SCEV *RA = getSCEV(U->getOperand(2)); + const SCEV *LDiff = getMinusSCEV(LA, LS); + const SCEV *RDiff = getMinusSCEV(RA, RS); + if (LDiff == RDiff) + return getAddExpr(getSMaxExpr(LS, RS), LDiff); + LDiff = getMinusSCEV(LA, RS); + RDiff = getMinusSCEV(RA, LS); + if (LDiff == RDiff) + return getAddExpr(getSMinExpr(LS, RS), LDiff); + } break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: @@ -3173,28 +3456,52 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { // fall through case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: - if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) - return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); - else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) - return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); + // a >u b ? a+x : b+x -> umax(a, b)+x + // a >u b ? b+x : a+x -> umin(a, b)+x + if (LHS->getType() == U->getType()) { + const SCEV *LS = getSCEV(LHS); + const SCEV *RS = getSCEV(RHS); + const SCEV *LA = getSCEV(U->getOperand(1)); + const SCEV *RA = getSCEV(U->getOperand(2)); + const SCEV *LDiff = getMinusSCEV(LA, LS); + const SCEV *RDiff = getMinusSCEV(RA, RS); + if (LDiff == RDiff) + return getAddExpr(getUMaxExpr(LS, RS), LDiff); + LDiff = getMinusSCEV(LA, RS); + RDiff = getMinusSCEV(RA, LS); + if (LDiff == RDiff) + return getAddExpr(getUMinExpr(LS, RS), LDiff); + } break; case ICmpInst::ICMP_NE: - // n != 0 ? n : 1 -> umax(n, 1) - if (LHS == U->getOperand(1) && - isa(U->getOperand(2)) && - cast(U->getOperand(2))->isOne() && + // n != 0 ? n+x : 1+x -> umax(n, 1)+x + if (LHS->getType() == U->getType() && isa(RHS) && - cast(RHS)->isZero()) - return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); + cast(RHS)->isZero()) { + const SCEV *One = getConstant(LHS->getType(), 1); + const SCEV *LS = getSCEV(LHS); + const SCEV *LA = getSCEV(U->getOperand(1)); + const SCEV *RA = getSCEV(U->getOperand(2)); + const SCEV *LDiff = getMinusSCEV(LA, LS); + const SCEV *RDiff = getMinusSCEV(RA, One); + if (LDiff == RDiff) + return getAddExpr(getUMaxExpr(LS, One), LDiff); + } break; case ICmpInst::ICMP_EQ: - // n == 0 ? 1 : n -> umax(n, 1) - if (LHS == U->getOperand(2) && - isa(U->getOperand(1)) && - cast(U->getOperand(1))->isOne() && + // n == 0 ? 1+x : n+x -> umax(n, 1)+x + if (LHS->getType() == U->getType() && isa(RHS) && - cast(RHS)->isZero()) - return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); + cast(RHS)->isZero()) { + const SCEV *One = getConstant(LHS->getType(), 1); + const SCEV *LS = getSCEV(LHS); + const SCEV *LA = getSCEV(U->getOperand(1)); + const SCEV *RA = getSCEV(U->getOperand(2)); + const SCEV *LDiff = getMinusSCEV(LA, One); + const SCEV *RDiff = getMinusSCEV(RA, LS); + if (LDiff == RDiff) + return getAddExpr(getUMaxExpr(LS, One), LDiff); + } break; default: break; @@ -3251,26 +3558,26 @@ PushLoopPHIs(const Loop *L, SmallVectorImpl &Worklist) { const ScalarEvolution::BackedgeTakenInfo & ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { // Initially insert a CouldNotCompute for this loop. If the insertion - // succeeds, procede to actually compute a backedge-taken count and + // succeeds, proceed to actually compute a backedge-taken count and // update the value. The temporary CouldNotCompute value tells SCEV // code elsewhere that it shouldn't attempt to request a new // backedge-taken count, which could result in infinite recursion. std::pair::iterator, bool> Pair = BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); if (Pair.second) { - BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); - if (ItCount.Exact != getCouldNotCompute()) { - assert(ItCount.Exact->isLoopInvariant(L) && - ItCount.Max->isLoopInvariant(L) && - "Computed trip count isn't loop invariant for loop!"); + BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L); + if (BECount.Exact != getCouldNotCompute()) { + assert(BECount.Exact->isLoopInvariant(L) && + BECount.Max->isLoopInvariant(L) && + "Computed backedge-taken count isn't loop invariant for loop!"); ++NumTripCountsComputed; // Update the value in the map. - Pair.first->second = ItCount; + Pair.first->second = BECount; } else { - if (ItCount.Max != getCouldNotCompute()) + if (BECount.Max != getCouldNotCompute()) // Update the value in the map. - Pair.first->second = ItCount; + Pair.first->second = BECount; if (isa(L->getHeader()->begin())) // Only count loops that have phi nodes as not being computable. ++NumTripCountsNotComputed; @@ -3281,7 +3588,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { // conservative estimates made without the benefit of trip count // information. This is similar to the code in forgetLoop, except that // it handles SCEVUnknown PHI nodes specially. - if (ItCount.hasAnyInfo()) { + if (BECount.hasAnyInfo()) { SmallVector Worklist; PushLoopPHIs(L, Worklist); @@ -3343,6 +3650,55 @@ void ScalarEvolution::forgetLoop(const Loop *L) { } } +/// forgetValue - This method should be called by the client when it has +/// changed a value in a way that may effect its value, or which may +/// disconnect it from a def-use chain linking it to a loop. +void ScalarEvolution::forgetValue(Value *V) { + Instruction *I = dyn_cast(V); + if (!I) return; + + // Drop information about expressions based on loop-header PHIs. + SmallVector Worklist; + Worklist.push_back(I); + + SmallPtrSet Visited; + while (!Worklist.empty()) { + I = Worklist.pop_back_val(); + if (!Visited.insert(I)) continue; + + std::map::iterator It = + Scalars.find(static_cast(I)); + if (It != Scalars.end()) { + ValuesAtScopes.erase(It->second); + Scalars.erase(It); + if (PHINode *PN = dyn_cast(I)) + ConstantEvolutionLoopExitValue.erase(PN); + } + + // If there's a SCEVUnknown tying this value into the SCEV + // space, remove it from the folding set map. The SCEVUnknown + // object and any other SCEV objects which reference it + // (transitively) remain allocated, effectively leaked until + // the underlying BumpPtrAllocator is freed. + // + // This permits SCEV pointers to be used as keys in maps + // such as the ValuesAtScopes map. + FoldingSetNodeID ID; + ID.AddInteger(scUnknown); + ID.AddPointer(I); + void *IP; + if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { + UniqueSCEVs.RemoveNode(S); + + // This isn't necessary, but we might as well remove the + // value from the ValuesAtScopes map too. + ValuesAtScopes.erase(S); + } + + PushDefUseChildren(I, Worklist); + } +} + /// ComputeBackedgeTakenCount - Compute the number of times the backedge /// of the specified loop will execute. ScalarEvolution::BackedgeTakenInfo @@ -3439,7 +3795,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, return getCouldNotCompute(); } - // Procede to the next level to examine the exit condition expression. + // Proceed to the next level to examine the exit condition expression. return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), ExitBr->getSuccessor(0), ExitBr->getSuccessor(1)); @@ -3528,10 +3884,23 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, } // With an icmp, it may be feasible to compute an exact backedge-taken count. - // Procede to the next level to examine the icmp. + // Proceed to the next level to examine the icmp. if (ICmpInst *ExitCondICmp = dyn_cast(ExitCond)) return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); + // Check for a constant condition. These are normally stripped out by + // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to + // preserve the CFG and is temporarily leaving constant conditions + // in place. + if (ConstantInt *CI = dyn_cast(ExitCond)) { + if (L->contains(FBB) == !CI->getZExtValue()) + // The backedge is always taken. + return getCouldNotCompute(); + else + // The backedge is never taken. + return getConstant(CI->getType(), 0); + } + // If it's not an integer or pointer comparison then compute it the hard way. return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); } @@ -3555,14 +3924,10 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, // Handle common loops like: for (X = "string"; *X; ++X) if (LoadInst *LI = dyn_cast(ExitCond->getOperand(0))) if (Constant *RHS = dyn_cast(ExitCond->getOperand(1))) { - const SCEV *ItCnt = + BackedgeTakenInfo ItCnt = ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); - if (!isa(ItCnt)) { - unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); - return BackedgeTakenInfo(ItCnt, - isa(ItCnt) ? ItCnt : - getConstant(APInt::getMaxValue(BitWidth)-1)); - } + if (ItCnt.hasAnyInfo()) + return ItCnt; } const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); @@ -3580,6 +3945,9 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, Cond = ICmpInst::getSwappedPredicate(Cond); } + // Simplify the operands before analyzing them. + (void)SimplifyICmpOperands(Cond, LHS, RHS); + // If we have a comparison of a chrec against a constant, try to use value // ranges to answer this query. if (const SCEVConstant *RHSC = dyn_cast(RHS)) @@ -3596,14 +3964,14 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, switch (Cond) { case ICmpInst::ICMP_NE: { // while (X != Y) // Convert to: while (X-Y != 0) - const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); - if (!isa(TC)) return TC; + BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L); + if (BTI.hasAnyInfo()) return BTI; break; } case ICmpInst::ICMP_EQ: { // while (X == Y) // Convert to: while (X-Y == 0) - const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); - if (!isa(TC)) return TC; + BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); + if (BTI.hasAnyInfo()) return BTI; break; } case ICmpInst::ICMP_SLT: { @@ -3690,7 +4058,7 @@ GetAddressedElementFromGlobal(GlobalVariable *GV, /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of /// 'icmp op load X, cst', try to see if we can compute the backedge /// execution count. -const SCEV * +ScalarEvolution::BackedgeTakenInfo ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( LoadInst *LI, Constant *RHS, @@ -3699,6 +4067,7 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( if (LI->isVolatile()) return getCouldNotCompute(); // Check to see if the loaded pointer is a getelementptr of a global. + // TODO: Use SCEV instead of manually grubbing with GEPs. GetElementPtrInst *GEP = dyn_cast(LI->getOperand(0)); if (!GEP) return getCouldNotCompute(); @@ -3807,8 +4176,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { // constant or derived from a PHI node themselves. PHINode *PHI = 0; for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) - if (!(isa(I->getOperand(Op)) || - isa(I->getOperand(Op)))) { + if (!isa(I->getOperand(Op))) { PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); if (P == 0) return 0; // Not evolving from PHI if (PHI == 0) @@ -3829,11 +4197,9 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal, const TargetData *TD) { if (isa(V)) return PHIVal; if (Constant *C = dyn_cast(V)) return C; - if (GlobalValue *GV = dyn_cast(V)) return GV; Instruction *I = cast(V); - std::vector Operands; - Operands.resize(I->getNumOperands()); + std::vector Operands(I->getNumOperands()); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); @@ -3860,7 +4226,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, if (I != ConstantEvolutionLoopExitValue.end()) return I->second; - if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) + if (BEs.ugt(MaxBruteForceIterations)) return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; @@ -3875,8 +4241,8 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, return RetVal = 0; // Must be a constant. Value *BEValue = PN->getIncomingValue(SecondIsBackedge); - PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); - if (PN2 != PN) + if (getConstantEvolvingPHI(BEValue, L) != PN && + !isa(BEValue)) return RetVal = 0; // Not derived from same PHI. // Execute the loop symbolically to determine the exit value. @@ -3911,8 +4277,11 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, PHINode *PN = getConstantEvolvingPHI(Cond, L); if (PN == 0) return getCouldNotCompute(); - // Since the loop is canonicalized, the PHI node must have two entries. One - // entry must be a constant (coming in from outside of the loop), and the + // If the loop is canonicalized, the PHI will have exactly two entries. + // That's the only form we support here. + if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); + + // One entry must be a constant (coming in from outside of the loop), and the // second must be derived from the same PHI. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); Constant *StartCST = @@ -3920,8 +4289,9 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. Value *BEValue = PN->getIncomingValue(SecondIsBackedge); - PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); - if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI. + if (getConstantEvolvingPHI(BEValue, L) != PN && + !isa(BEValue)) + return getCouldNotCompute(); // Not derived from same PHI. // Okay, we find a PHI node that defines the trip count of this loop. Execute // the loop symbolically to determine when the condition gets a value of @@ -4009,53 +4379,51 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { // the arguments into constants, and if so, try to constant propagate the // result. This is particularly useful for computing loop exit values. if (CanConstantFold(I)) { - std::vector Operands; - Operands.reserve(I->getNumOperands()); + SmallVector Operands; + bool MadeImprovement = false; for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Value *Op = I->getOperand(i); if (Constant *C = dyn_cast(Op)) { Operands.push_back(C); - } else { - // If any of the operands is non-constant and if they are - // non-integer and non-pointer, don't even try to analyze them - // with scev techniques. - if (!isSCEVable(Op->getType())) - return V; - - const SCEV *OpV = getSCEVAtScope(Op, L); - if (const SCEVConstant *SC = dyn_cast(OpV)) { - Constant *C = SC->getValue(); - if (C->getType() != Op->getType()) - C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, - Op->getType(), - false), - C, Op->getType()); - Operands.push_back(C); - } else if (const SCEVUnknown *SU = dyn_cast(OpV)) { - if (Constant *C = dyn_cast(SU->getValue())) { - if (C->getType() != Op->getType()) - C = - ConstantExpr::getCast(CastInst::getCastOpcode(C, false, - Op->getType(), - false), - C, Op->getType()); - Operands.push_back(C); - } else - return V; - } else { - return V; - } + continue; } + + // If any of the operands is non-constant and if they are + // non-integer and non-pointer, don't even try to analyze them + // with scev techniques. + if (!isSCEVable(Op->getType())) + return V; + + const SCEV *OrigV = getSCEV(Op); + const SCEV *OpV = getSCEVAtScope(OrigV, L); + MadeImprovement |= OrigV != OpV; + + Constant *C = 0; + if (const SCEVConstant *SC = dyn_cast(OpV)) + C = SC->getValue(); + if (const SCEVUnknown *SU = dyn_cast(OpV)) + C = dyn_cast(SU->getValue()); + if (!C) return V; + if (C->getType() != Op->getType()) + C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, + Op->getType(), + false), + C, Op->getType()); + Operands.push_back(C); } - Constant *C; - if (const CmpInst *CI = dyn_cast(I)) - C = ConstantFoldCompareInstOperands(CI->getPredicate(), - Operands[0], Operands[1], TD); - else - C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), - &Operands[0], Operands.size(), TD); - return getSCEV(C); + // Check to see if getSCEVAtScope actually made an improvement. + if (MadeImprovement) { + Constant *C = 0; + if (const CmpInst *CI = dyn_cast(I)) + C = ConstantFoldCompareInstOperands(CI->getPredicate(), + Operands[0], Operands[1], TD); + else + C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), + &Operands[0], Operands.size(), TD); + if (!C) return V; + return getSCEV(C); + } } } @@ -4105,15 +4473,38 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { // If this is a loop recurrence for a loop that does not contain L, then we // are dealing with the final value computed by the loop. if (const SCEVAddRecExpr *AddRec = dyn_cast(V)) { - if (!L || !AddRec->getLoop()->contains(L)) { - // To evaluate this recurrence, we need to know how many times the AddRec - // loop iterates. Compute this now. - const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); - if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; + // First, attempt to evaluate each operand. + // Avoid performing the look-up in the common case where the specified + // expression has no loop-variant portions. + for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { + const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); + if (OpAtScope == AddRec->getOperand(i)) + continue; + + // Okay, at least one of these operands is loop variant but might be + // foldable. Build a new instance of the folded commutative expression. + SmallVector NewOps(AddRec->op_begin(), + AddRec->op_begin()+i); + NewOps.push_back(OpAtScope); + for (++i; i != e; ++i) + NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); + + AddRec = cast(getAddRecExpr(NewOps, AddRec->getLoop())); + break; + } + + // If the scope is outside the addrec's loop, evaluate it by using the + // loop exit value of the addrec. + if (!AddRec->getLoop()->contains(L)) { + // To evaluate this recurrence, we need to know how many times the AddRec + // loop iterates. Compute this now. + const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); + if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; // Then, evaluate the AddRec. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); } + return AddRec; } @@ -4138,9 +4529,6 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { return getTruncateExpr(Op, Cast->getType()); } - if (isa(V)) - return V; - llvm_unreachable("Unknown SCEV type!"); return 0; } @@ -4266,7 +4654,8 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { /// HowFarToZero - Return the number of times a backedge comparing the specified /// value to zero will execute. If not computable, return CouldNotCompute. -const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { +ScalarEvolution::BackedgeTakenInfo +ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // If the value is a constant if (const SCEVConstant *C = dyn_cast(V)) { // If the value is already zero, the branch will execute zero times. @@ -4311,7 +4700,7 @@ const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { -StartC->getValue()->getValue(), *this); } - } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { + } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of // the quadratic equation to solve it. std::pair Roots = SolveQuadraticEquation(AddRec, @@ -4346,7 +4735,8 @@ const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute -const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { +ScalarEvolution::BackedgeTakenInfo +ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { // Loops that look like: while (X == 0) are very strange indeed. We don't // handle them yet except for the trivial case. This could be expanded in the // future as needed. @@ -4355,7 +4745,7 @@ const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { // already. If so, the backedge will execute zero times. if (const SCEVConstant *C = dyn_cast(V)) { if (!C->getValue()->isNullValue()) - return getIntegerSCEV(0, C->getType()); + return getConstant(C->getType(), 0); return getCouldNotCompute(); // Otherwise it will loop infinitely. } @@ -4364,41 +4754,26 @@ const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { return getCouldNotCompute(); } -/// getLoopPredecessor - If the given loop's header has exactly one unique -/// predecessor outside the loop, return it. Otherwise return null. -/// -BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { - BasicBlock *Header = L->getHeader(); - BasicBlock *Pred = 0; - for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); - PI != E; ++PI) - if (!L->contains(*PI)) { - if (Pred && Pred != *PI) return 0; // Multiple predecessors. - Pred = *PI; - } - return Pred; -} - /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB /// (which may not be an immediate predecessor) which has exactly one /// successor from which BB is reachable, or null if no such block is /// found. /// -BasicBlock * +std::pair ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { // If the block has a unique predecessor, then there is no path from the // predecessor to the block that does not go through the direct edge // from the predecessor to the block. if (BasicBlock *Pred = BB->getSinglePredecessor()) - return Pred; + return std::make_pair(Pred, BB); // A loop's header is defined to be a block that dominates the loop. // If the header has a unique predecessor outside the loop, it must be // a block that has exactly one successor that can reach the loop. if (Loop *L = LI->getLoopFor(BB)) - return getLoopPredecessor(L); + return std::make_pair(L->getLoopPredecessor(), L->getHeader()); - return 0; + return std::pair(); } /// HasSameValue - SCEV structural equivalence is usually sufficient for @@ -4424,6 +4799,266 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) { return false; } +/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with +/// predicate Pred. Return true iff any changes were made. +/// +bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, + const SCEV *&LHS, const SCEV *&RHS) { + bool Changed = false; + + // Canonicalize a constant to the right side. + if (const SCEVConstant *LHSC = dyn_cast(LHS)) { + // Check for both operands constant. + if (const SCEVConstant *RHSC = dyn_cast(RHS)) { + if (ConstantExpr::getICmp(Pred, + LHSC->getValue(), + RHSC->getValue())->isNullValue()) + goto trivially_false; + else + goto trivially_true; + } + // Otherwise swap the operands to put the constant on the right. + std::swap(LHS, RHS); + Pred = ICmpInst::getSwappedPredicate(Pred); + Changed = true; + } + + // If we're comparing an addrec with a value which is loop-invariant in the + // addrec's loop, put the addrec on the left. Also make a dominance check, + // as both operands could be addrecs loop-invariant in each other's loop. + if (const SCEVAddRecExpr *AR = dyn_cast(RHS)) { + const Loop *L = AR->getLoop(); + if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) { + std::swap(LHS, RHS); + Pred = ICmpInst::getSwappedPredicate(Pred); + Changed = true; + } + } + + // If there's a constant operand, canonicalize comparisons with boundary + // cases, and canonicalize *-or-equal comparisons to regular comparisons. + if (const SCEVConstant *RC = dyn_cast(RHS)) { + const APInt &RA = RC->getValue()->getValue(); + switch (Pred) { + default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); + case ICmpInst::ICMP_EQ: + case ICmpInst::ICMP_NE: + break; + case ICmpInst::ICMP_UGE: + if ((RA - 1).isMinValue()) { + Pred = ICmpInst::ICMP_NE; + RHS = getConstant(RA - 1); + Changed = true; + break; + } + if (RA.isMaxValue()) { + Pred = ICmpInst::ICMP_EQ; + Changed = true; + break; + } + if (RA.isMinValue()) goto trivially_true; + + Pred = ICmpInst::ICMP_UGT; + RHS = getConstant(RA - 1); + Changed = true; + break; + case ICmpInst::ICMP_ULE: + if ((RA + 1).isMaxValue()) { + Pred = ICmpInst::ICMP_NE; + RHS = getConstant(RA + 1); + Changed = true; + break; + } + if (RA.isMinValue()) { + Pred = ICmpInst::ICMP_EQ; + Changed = true; + break; + } + if (RA.isMaxValue()) goto trivially_true; + + Pred = ICmpInst::ICMP_ULT; + RHS = getConstant(RA + 1); + Changed = true; + break; + case ICmpInst::ICMP_SGE: + if ((RA - 1).isMinSignedValue()) { + Pred = ICmpInst::ICMP_NE; + RHS = getConstant(RA - 1); + Changed = true; + break; + } + if (RA.isMaxSignedValue()) { + Pred = ICmpInst::ICMP_EQ; + Changed = true; + break; + } + if (RA.isMinSignedValue()) goto trivially_true; + + Pred = ICmpInst::ICMP_SGT; + RHS = getConstant(RA - 1); + Changed = true; + break; + case ICmpInst::ICMP_SLE: + if ((RA + 1).isMaxSignedValue()) { + Pred = ICmpInst::ICMP_NE; + RHS = getConstant(RA + 1); + Changed = true; + break; + } + if (RA.isMinSignedValue()) { + Pred = ICmpInst::ICMP_EQ; + Changed = true; + break; + } + if (RA.isMaxSignedValue()) goto trivially_true; + + Pred = ICmpInst::ICMP_SLT; + RHS = getConstant(RA + 1); + Changed = true; + break; + case ICmpInst::ICMP_UGT: + if (RA.isMinValue()) { + Pred = ICmpInst::ICMP_NE; + Changed = true; + break; + } + if ((RA + 1).isMaxValue()) { + Pred = ICmpInst::ICMP_EQ; + RHS = getConstant(RA + 1); + Changed = true; + break; + } + if (RA.isMaxValue()) goto trivially_false; + break; + case ICmpInst::ICMP_ULT: + if (RA.isMaxValue()) { + Pred = ICmpInst::ICMP_NE; + Changed = true; + break; + } + if ((RA - 1).isMinValue()) { + Pred = ICmpInst::ICMP_EQ; + RHS = getConstant(RA - 1); + Changed = true; + break; + } + if (RA.isMinValue()) goto trivially_false; + break; + case ICmpInst::ICMP_SGT: + if (RA.isMinSignedValue()) { + Pred = ICmpInst::ICMP_NE; + Changed = true; + break; + } + if ((RA + 1).isMaxSignedValue()) { + Pred = ICmpInst::ICMP_EQ; + RHS = getConstant(RA + 1); + Changed = true; + break; + } + if (RA.isMaxSignedValue()) goto trivially_false; + break; + case ICmpInst::ICMP_SLT: + if (RA.isMaxSignedValue()) { + Pred = ICmpInst::ICMP_NE; + Changed = true; + break; + } + if ((RA - 1).isMinSignedValue()) { + Pred = ICmpInst::ICMP_EQ; + RHS = getConstant(RA - 1); + Changed = true; + break; + } + if (RA.isMinSignedValue()) goto trivially_false; + break; + } + } + + // Check for obvious equality. + if (HasSameValue(LHS, RHS)) { + if (ICmpInst::isTrueWhenEqual(Pred)) + goto trivially_true; + if (ICmpInst::isFalseWhenEqual(Pred)) + goto trivially_false; + } + + // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by + // adding or subtracting 1 from one of the operands. + switch (Pred) { + case ICmpInst::ICMP_SLE: + if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { + RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, + /*HasNUW=*/false, /*HasNSW=*/true); + Pred = ICmpInst::ICMP_SLT; + Changed = true; + } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { + LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, + /*HasNUW=*/false, /*HasNSW=*/true); + Pred = ICmpInst::ICMP_SLT; + Changed = true; + } + break; + case ICmpInst::ICMP_SGE: + if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { + RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, + /*HasNUW=*/false, /*HasNSW=*/true); + Pred = ICmpInst::ICMP_SGT; + Changed = true; + } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { + LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, + /*HasNUW=*/false, /*HasNSW=*/true); + Pred = ICmpInst::ICMP_SGT; + Changed = true; + } + break; + case ICmpInst::ICMP_ULE: + if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { + RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, + /*HasNUW=*/true, /*HasNSW=*/false); + Pred = ICmpInst::ICMP_ULT; + Changed = true; + } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { + LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, + /*HasNUW=*/true, /*HasNSW=*/false); + Pred = ICmpInst::ICMP_ULT; + Changed = true; + } + break; + case ICmpInst::ICMP_UGE: + if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { + RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, + /*HasNUW=*/true, /*HasNSW=*/false); + Pred = ICmpInst::ICMP_UGT; + Changed = true; + } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { + LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, + /*HasNUW=*/true, /*HasNSW=*/false); + Pred = ICmpInst::ICMP_UGT; + Changed = true; + } + break; + default: + break; + } + + // TODO: More simplifications are possible here. + + return Changed; + +trivially_true: + // Return 0 == 0. + LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); + Pred = ICmpInst::ICMP_EQ; + return true; + +trivially_false: + // Return 0 != 0. + LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); + Pred = ICmpInst::ICMP_NE; + return true; +} + bool ScalarEvolution::isKnownNegative(const SCEV *S) { return getSignedRange(S).getSignedMax().isNegative(); } @@ -4446,10 +5081,36 @@ bool ScalarEvolution::isKnownNonZero(const SCEV *S) { bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { + // Canonicalize the inputs first. + (void)SimplifyICmpOperands(Pred, LHS, RHS); + + // If LHS or RHS is an addrec, check to see if the condition is true in + // every iteration of the loop. + if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) + if (isLoopEntryGuardedByCond( + AR->getLoop(), Pred, AR->getStart(), RHS) && + isLoopBackedgeGuardedByCond( + AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS)) + return true; + if (const SCEVAddRecExpr *AR = dyn_cast(RHS)) + if (isLoopEntryGuardedByCond( + AR->getLoop(), Pred, LHS, AR->getStart()) && + isLoopBackedgeGuardedByCond( + AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this))) + return true; + + // Otherwise see what can be done with known constant ranges. + return isKnownPredicateWithRanges(Pred, LHS, RHS); +} +bool +ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, + const SCEV *LHS, const SCEV *RHS) { if (HasSameValue(LHS, RHS)) return ICmpInst::isTrueWhenEqual(Pred); + // This code is split out from isKnownPredicate because it is called from + // within isLoopEntryGuardedByCond. switch (Pred) { default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); @@ -4546,35 +5207,33 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, LoopContinuePredicate->getSuccessor(0) != L->getHeader()); } -/// isLoopGuardedByCond - Test whether entry to the loop is protected +/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected /// by a conditional between LHS and RHS. This is used to help avoid max /// expressions in loop trip counts, and to eliminate casts. bool -ScalarEvolution::isLoopGuardedByCond(const Loop *L, - ICmpInst::Predicate Pred, - const SCEV *LHS, const SCEV *RHS) { +ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, + ICmpInst::Predicate Pred, + const SCEV *LHS, const SCEV *RHS) { // Interpret a null as meaning no loop, where there is obviously no guard // (interprocedural conditions notwithstanding). if (!L) return false; - BasicBlock *Predecessor = getLoopPredecessor(L); - BasicBlock *PredecessorDest = L->getHeader(); - // Starting at the loop predecessor, climb up the predecessor chain, as long // as there are predecessors that can be found that have unique successors // leading to the original header. - for (; Predecessor; - PredecessorDest = Predecessor, - Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { + for (std::pair + Pair(L->getLoopPredecessor(), L->getHeader()); + Pair.first; + Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { BranchInst *LoopEntryPredicate = - dyn_cast(Predecessor->getTerminator()); + dyn_cast(Pair.first->getTerminator()); if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) continue; if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS, - LoopEntryPredicate->getSuccessor(0) != PredecessorDest)) + LoopEntryPredicate->getSuccessor(0) != Pair.second)) return true; } @@ -4587,7 +5246,7 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, bool Inverse) { - // Recursivly handle And and Or conditions. + // Recursively handle And and Or conditions. if (BinaryOperator *BO = dyn_cast(CondValue)) { if (BO->getOpcode() == Instruction::And) { if (!Inverse) @@ -4638,117 +5297,12 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue, // Canonicalize the query to match the way instcombine will have // canonicalized the comparison. - // First, put a constant operand on the right. - if (isa(LHS)) { - std::swap(LHS, RHS); - Pred = ICmpInst::getSwappedPredicate(Pred); - } - // Then, canonicalize comparisons with boundary cases. - if (const SCEVConstant *RC = dyn_cast(RHS)) { - const APInt &RA = RC->getValue()->getValue(); - switch (Pred) { - default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); - case ICmpInst::ICMP_EQ: - case ICmpInst::ICMP_NE: - break; - case ICmpInst::ICMP_UGE: - if ((RA - 1).isMinValue()) { - Pred = ICmpInst::ICMP_NE; - RHS = getConstant(RA - 1); - break; - } - if (RA.isMaxValue()) { - Pred = ICmpInst::ICMP_EQ; - break; - } - if (RA.isMinValue()) return true; - break; - case ICmpInst::ICMP_ULE: - if ((RA + 1).isMaxValue()) { - Pred = ICmpInst::ICMP_NE; - RHS = getConstant(RA + 1); - break; - } - if (RA.isMinValue()) { - Pred = ICmpInst::ICMP_EQ; - break; - } - if (RA.isMaxValue()) return true; - break; - case ICmpInst::ICMP_SGE: - if ((RA - 1).isMinSignedValue()) { - Pred = ICmpInst::ICMP_NE; - RHS = getConstant(RA - 1); - break; - } - if (RA.isMaxSignedValue()) { - Pred = ICmpInst::ICMP_EQ; - break; - } - if (RA.isMinSignedValue()) return true; - break; - case ICmpInst::ICMP_SLE: - if ((RA + 1).isMaxSignedValue()) { - Pred = ICmpInst::ICMP_NE; - RHS = getConstant(RA + 1); - break; - } - if (RA.isMinSignedValue()) { - Pred = ICmpInst::ICMP_EQ; - break; - } - if (RA.isMaxSignedValue()) return true; - break; - case ICmpInst::ICMP_UGT: - if (RA.isMinValue()) { - Pred = ICmpInst::ICMP_NE; - break; - } - if ((RA + 1).isMaxValue()) { - Pred = ICmpInst::ICMP_EQ; - RHS = getConstant(RA + 1); - break; - } - if (RA.isMaxValue()) return false; - break; - case ICmpInst::ICMP_ULT: - if (RA.isMaxValue()) { - Pred = ICmpInst::ICMP_NE; - break; - } - if ((RA - 1).isMinValue()) { - Pred = ICmpInst::ICMP_EQ; - RHS = getConstant(RA - 1); - break; - } - if (RA.isMinValue()) return false; - break; - case ICmpInst::ICMP_SGT: - if (RA.isMinSignedValue()) { - Pred = ICmpInst::ICMP_NE; - break; - } - if ((RA + 1).isMaxSignedValue()) { - Pred = ICmpInst::ICMP_EQ; - RHS = getConstant(RA + 1); - break; - } - if (RA.isMaxSignedValue()) return false; - break; - case ICmpInst::ICMP_SLT: - if (RA.isMaxSignedValue()) { - Pred = ICmpInst::ICMP_NE; - break; - } - if ((RA - 1).isMinSignedValue()) { - Pred = ICmpInst::ICMP_EQ; - RHS = getConstant(RA - 1); - break; - } - if (RA.isMinSignedValue()) return false; - break; - } - } + if (SimplifyICmpOperands(Pred, LHS, RHS)) + if (LHS == RHS) + return CmpInst::isTrueWhenEqual(Pred); + if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) + if (FoundLHS == FoundRHS) + return CmpInst::isFalseWhenEqual(Pred); // Check to see if we can make the LHS or RHS match. if (LHS == FoundRHS || RHS == FoundLHS) { @@ -4790,7 +5344,7 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue, } /// isImpliedCondOperands - Test whether the condition described by Pred, -/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS, +/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, /// and FoundRHS is true. bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, @@ -4805,7 +5359,7 @@ bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, } /// isImpliedCondOperandsHelper - Test whether the condition described by -/// Pred, LHS, and RHS is true whenever the condition desribed by Pred, +/// Pred, LHS, and RHS is true whenever the condition described by Pred, /// FoundLHS, and FoundRHS is true. bool ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, @@ -4821,26 +5375,26 @@ ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, break; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: - if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) && - isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS)) + if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) && + isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: - if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) && - isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS)) + if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) && + isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: - if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) && - isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS)) + if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) && + isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: - if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) && - isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS)) + if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) && + isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS)) return true; break; } @@ -4855,8 +5409,11 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start, const SCEV *End, const SCEV *Step, bool NoWrap) { + assert(!isKnownNegative(Step) && + "This code doesn't handle negative strides yet!"); + const Type *Ty = Start->getType(); - const SCEV *NegOne = getIntegerSCEV(-1, Ty); + const SCEV *NegOne = getConstant(Ty, (uint64_t)-1); const SCEV *Diff = getMinusSCEV(End, Start); const SCEV *RoundUp = getAddExpr(Step, NegOne); @@ -4897,39 +5454,35 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, AddRec->hasNoUnsignedWrap(); if (AddRec->isAffine()) { - // FORNOW: We only support unit strides. unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); const SCEV *Step = AddRec->getStepRecurrence(*this); - // TODO: handle non-constant strides. - const SCEVConstant *CStep = dyn_cast(Step); - if (!CStep || CStep->isZero()) + if (Step->isZero()) return getCouldNotCompute(); - if (CStep->isOne()) { + if (Step->isOne()) { // With unit stride, the iteration never steps past the limit value. - } else if (CStep->getValue()->getValue().isStrictlyPositive()) { - if (NoWrap) { - // We know the iteration won't step past the maximum value for its type. - ; - } else if (const SCEVConstant *CLimit = dyn_cast(RHS)) { - // Test whether a positive iteration iteration can step past the limit - // value and past the maximum value for its type in a single step. - if (isSigned) { - APInt Max = APInt::getSignedMaxValue(BitWidth); - if ((Max - CStep->getValue()->getValue()) - .slt(CLimit->getValue()->getValue())) - return getCouldNotCompute(); - } else { - APInt Max = APInt::getMaxValue(BitWidth); - if ((Max - CStep->getValue()->getValue()) - .ult(CLimit->getValue()->getValue())) - return getCouldNotCompute(); - } - } else - // TODO: handle non-constant limit values below. - return getCouldNotCompute(); + } else if (isKnownPositive(Step)) { + // Test whether a positive iteration can step past the limit + // value and past the maximum value for its type in a single step. + // Note that it's not sufficient to check NoWrap here, because even + // though the value after a wrap is undefined, it's not undefined + // behavior, so if wrap does occur, the loop could either terminate or + // loop infinitely, but in either case, the loop is guaranteed to + // iterate at least until the iteration where the wrapping occurs. + const SCEV *One = getConstant(Step->getType(), 1); + if (isSigned) { + APInt Max = APInt::getSignedMaxValue(BitWidth); + if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) + .slt(getSignedRange(RHS).getSignedMax())) + return getCouldNotCompute(); + } else { + APInt Max = APInt::getMaxValue(BitWidth); + if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) + .ult(getUnsignedRange(RHS).getUnsignedMax())) + return getCouldNotCompute(); + } } else - // TODO: handle negative strides below. + // TODO: Handle negative strides here and below. return getCouldNotCompute(); // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant @@ -4950,10 +5503,10 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // only know that it will execute (max(m,n)-n)/s times. In both cases, // the division must round up. const SCEV *End = RHS; - if (!isLoopGuardedByCond(L, - isSigned ? ICmpInst::ICMP_SLT : - ICmpInst::ICMP_ULT, - getMinusSCEV(Start, Step), RHS)) + if (!isLoopEntryGuardedByCond(L, + isSigned ? ICmpInst::ICMP_SLT : + ICmpInst::ICMP_ULT, + getMinusSCEV(Start, Step), RHS)) End = isSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); @@ -4962,6 +5515,20 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, getSignedRange(End).getSignedMax() : getUnsignedRange(End).getUnsignedMax()); + // If MaxEnd is within a step of the maximum integer value in its type, + // adjust it down to the minimum value which would produce the same effect. + // This allows the subsequent ceiling division of (N+(step-1))/step to + // compute the correct value. + const SCEV *StepMinusOne = getMinusSCEV(Step, + getConstant(Step->getType(), 1)); + MaxEnd = isSigned ? + getSMinExpr(MaxEnd, + getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), + StepMinusOne)) : + getUMinExpr(MaxEnd, + getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), + StepMinusOne)); + // Finally, we subtract these two values and divide, rounding up, to get // the number of times the backedge is executed. const SCEV *BECount = getBECount(Start, End, Step, NoWrap); @@ -4990,7 +5557,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, if (const SCEVConstant *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { SmallVector Operands(op_begin(), op_end()); - Operands[0] = SE.getIntegerSCEV(0, SC->getType()); + Operands[0] = SE.getConstant(SC->getType(), 0); const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); if (const SCEVAddRecExpr *ShiftedAddRec = dyn_cast(Shifted)) @@ -5014,7 +5581,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // iteration exits. unsigned BitWidth = SE.getTypeSizeInBits(getType()); if (!Range.contains(APInt(BitWidth, 0))) - return SE.getIntegerSCEV(0, getType()); + return SE.getConstant(getType(), 0); if (isAffine()) { // If this is an affine expression then we have this situation: @@ -5167,8 +5734,8 @@ ScalarEvolution::ScalarEvolution() bool ScalarEvolution::runOnFunction(Function &F) { this->F = &F; LI = &getAnalysis(); - DT = &getAnalysis(); TD = getAnalysisIfAvailable(); + DT = &getAnalysis(); return false; } @@ -5227,7 +5794,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, } void ScalarEvolution::print(raw_ostream &OS, const Module *) const { - // ScalarEvolution's implementaiton of the print method is to print + // ScalarEvolution's implementation of the print method is to print // out SCEV values of all instructions that are interesting. Doing // this potentially causes it to create new SCEV objects though, // which technically conflicts with the const qualifier. This isn't @@ -5239,7 +5806,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const { WriteAsOperand(OS, F, /*PrintType=*/false); OS << "\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) - if (isSCEVable(I->getType())) { + if (isSCEVable(I->getType()) && !isa(*I)) { OS << *I << '\n'; OS << " --> "; const SCEV *SV = SE.getSCEV(&*I);