return;
}
+ // A simple case when N/1. The quotient is N.
+ if (Denominator->isOne()) {
+ *Quotient = Numerator;
+ *Remainder = D.Zero;
+ return;
+ }
+
// Split the Denominator when it is a product.
if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
const SCEV *Q, *R;
assert(Numerator->isAffine() && "Numerator should be affine");
divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
+ // Bail out if the types do not match.
+ Type *Ty = Denominator->getType();
+ if (Ty != StartQ->getType() || Ty != StartR->getType() ||
+ Ty != StepQ->getType() || Ty != StepR->getType()) {
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
+ }
Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
Numerator->getNoWrapFlags());
Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
// trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
- // eliminate all the truncates.
+ // eliminate all the truncates, or we replace other casts with truncates.
if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
SmallVector<const SCEV *, 4> Operands;
bool hasTrunc = false;
for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
- hasTrunc = isa<SCEVTruncateExpr>(S);
+ if (!isa<SCEVCastExpr>(SA->getOperand(i)))
+ hasTrunc = isa<SCEVTruncateExpr>(S);
Operands.push_back(S);
}
if (!hasTrunc)
}
// trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
- // eliminate all the truncates.
+ // eliminate all the truncates, or we replace other casts with truncates.
if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
SmallVector<const SCEV *, 4> Operands;
bool hasTrunc = false;
for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
- hasTrunc = isa<SCEVTruncateExpr>(S);
+ if (!isa<SCEVCastExpr>(SM->getOperand(i)))
+ hasTrunc = isa<SCEVTruncateExpr>(S);
Operands.push_back(S);
}
if (!hasTrunc)
return S;
}
+const SCEV *
+ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
+ const SmallVectorImpl<const SCEV *> &IndexExprs,
+ bool InBounds) {
+ // getSCEV(Base)->getType() has the same address space as Base->getType()
+ // because SCEV::getType() preserves the address space.
+ Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
+ // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
+ // instruction to its SCEV, because the Instruction may be guarded by control
+ // flow and the no-overflow bits may not be valid for the expression in any
+ // context. This can be fixed similarly to how these flags are handled for
+ // adds.
+ SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
+
+ const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
+ // The address space is unimportant. The first thing we do on CurTy is getting
+ // its element type.
+ Type *CurTy = PointerType::getUnqual(PointeeType);
+ for (const SCEV *IndexExpr : IndexExprs) {
+ // Compute the (potentially symbolic) offset in bytes for this index.
+ if (StructType *STy = dyn_cast<StructType>(CurTy)) {
+ // For a struct, add the member offset.
+ ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
+ unsigned FieldNo = Index->getZExtValue();
+ const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
+
+ // Add the field offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, FieldOffset);
+
+ // Update CurTy to the type of the field at Index.
+ CurTy = STy->getTypeAtIndex(Index);
+ } else {
+ // Update CurTy to its element type.
+ CurTy = cast<SequentialType>(CurTy)->getElementType();
+ // For an array, add the element offset, explicitly scaled.
+ const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
+ // Getelementptr indices are signed.
+ IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
+
+ // Multiply the index by the element size to compute the element offset.
+ const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
+
+ // Add the element offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, LocalOffset);
+ }
+ }
+
+ // Add the total offset from all the GEP indices to the base.
+ return getAddExpr(BaseExpr, TotalOffset, Wrap);
+}
+
const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
const SCEV *RHS) {
SmallVector<const SCEV *, 2> Ops;
}
const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
- // If we have DataLayout, we can bypass creating a target-independent
+ // We can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (DL)
- return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
-
- Constant *C = ConstantExpr::getSizeOf(AllocTy);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
- C = Folded;
- Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
- assert(Ty == IntTy && "Effective SCEV type doesn't match");
- return getTruncateOrZeroExtend(getSCEV(C), Ty);
+ return getConstant(IntTy,
+ F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
}
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
StructType *STy,
unsigned FieldNo) {
- // If we have DataLayout, we can bypass creating a target-independent
+ // We can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (DL) {
- return getConstant(IntTy,
- DL->getStructLayout(STy)->getElementOffset(FieldNo));
- }
-
- Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
- C = Folded;
-
- Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
- return getTruncateOrZeroExtend(getSCEV(C), Ty);
+ return getConstant(
+ IntTy,
+ F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
+ FieldNo));
}
const SCEV *ScalarEvolution::getUnknown(Value *V) {
/// for which isSCEVable must return true.
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
-
- // If we have a DataLayout, use it!
- if (DL)
- return DL->getTypeSizeInBits(Ty);
-
- // Integer types have fixed sizes.
- if (Ty->isIntegerTy())
- return Ty->getPrimitiveSizeInBits();
-
- // The only other support type is pointer. Without DataLayout, conservatively
- // assume pointers are 64-bit.
- assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
- return 64;
+ return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
}
/// getEffectiveSCEVType - Return a type with the same bitwidth as
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
-
- if (DL)
- return DL->getIntPtrType(Ty);
-
- // Without DataLayout, conservatively assume pointers are 64-bit.
- return Type::getInt64Ty(getContext());
+ return F->getParent()->getDataLayout().getIntPtrType(Ty);
}
const SCEV *ScalarEvolution::getCouldNotCompute() {
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
+ const SCEV *S = getExistingSCEV(V);
+ if (S == nullptr) {
+ S = createSCEV(V);
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+ }
+ return S;
+}
+
+const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
+ assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
+
ValueExprMapType::iterator I = ValueExprMap.find_as(V);
if (I != ValueExprMap.end()) {
const SCEV *S = I->second;
if (checkValidity(S))
return S;
- else
- ValueExprMap.erase(I);
+ ValueExprMap.erase(I);
}
- const SCEV *S = createSCEV(V);
-
- // The process of creating a SCEV for V may have caused other SCEVs
- // to have been created, so it's necessary to insert the new entry
- // from scratch, rather than trying to remember the insert position
- // above.
- ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
- return S;
+ return nullptr;
}
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
// If the increment doesn't overflow, then neither the addrec nor
// the post-increment will overflow.
if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
- if (OBO->hasNoUnsignedWrap())
- Flags = setFlags(Flags, SCEV::FlagNUW);
- if (OBO->hasNoSignedWrap())
- Flags = setFlags(Flags, SCEV::FlagNSW);
+ if (OBO->getOperand(0) == PN) {
+ if (OBO->hasNoUnsignedWrap())
+ Flags = setFlags(Flags, SCEV::FlagNUW);
+ if (OBO->hasNoSignedWrap())
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+ }
} else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
// If the increment is an inbounds GEP, then we know the address
// space cannot be wrapped around. We cannot make any guarantee
// unsigned but we may have a negative index from the base
// pointer. We can guarantee that no unsigned wrap occurs if the
// indices form a positive value.
- if (GEP->isInBounds()) {
+ if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
Flags = setFlags(Flags, SCEV::FlagNW);
const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, DL, TLI, DT, AC))
+ if (Value *V =
+ SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
/// operations. This allows them to be analyzed by regular SCEV code.
///
const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
- Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!Base->getType()->getPointerElementType()->isSized())
return getUnknown(GEP);
- // Don't blindly transfer the inbounds flag from the GEP instruction to the
- // Add expression, because the Instruction may be guarded by control flow
- // and the no-overflow bits may not be valid for the expression in any
- // context.
- SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
-
- const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
- E = GEP->op_end();
- I != E; ++I) {
- Value *Index = *I;
- // Compute the (potentially symbolic) offset in bytes for this index.
- if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
- // For a struct, add the member offset.
- unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
-
- // Add the field offset to the running total offset.
- TotalOffset = getAddExpr(TotalOffset, FieldOffset);
- } else {
- // For an array, add the element offset, explicitly scaled.
- const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
- const SCEV *IndexS = getSCEV(Index);
- // Getelementptr indices are signed.
- IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
-
- // Multiply the index by the element size to compute the element offset.
- const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
-
- // Add the element offset to the running total offset.
- TotalOffset = getAddExpr(TotalOffset, LocalOffset);
- }
- }
-
- // Get the SCEV for the GEP base.
- const SCEV *BaseS = getSCEV(Base);
-
- // Add the total offset from all the GEP indices to the base.
- return getAddExpr(BaseS, TotalOffset, Wrap);
+ SmallVector<const SCEV *, 4> IndexExprs;
+ for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
+ IndexExprs.push_back(getSCEV(*Index));
+ return getGEPExpr(GEP->getSourceElementType(), getSCEV(Base), IndexExprs,
+ GEP->isInBounds());
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
+ computeKnownBits(U->getValue(), Zeros, Ones,
+ F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
return Zeros.countTrailingOnes();
}
// Split here to avoid paying the compile-time cost of calling both
// computeKnownBits and ComputeNumSignBits. This restriction can be lifted
// if needed.
-
+ const DataLayout &DL = F->getParent()->getDataLayout();
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
} else {
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
"generalize as needed!");
- if (U->getValue()->getType()->isIntegerTy() || DL) {
- unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
- if (NS > 1)
- ConservativeResult = ConservativeResult.intersectWith(ConstantRange(
- APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
- APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
- }
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
+ if (NS > 1)
+ ConservativeResult = ConservativeResult.intersectWith(
+ ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
+ APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
return setRange(U, SignHint, ConservativeResult);
return setRange(S, SignHint, ConservativeResult);
}
-/// createSCEV - We know that there is no SCEV for the specified value.
-/// Analyze the expression.
+SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
+ const BinaryOperator *BinOp = cast<BinaryOperator>(V);
+
+ // Return early if there are no flags to propagate to the SCEV.
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+ if (BinOp->hasNoUnsignedWrap())
+ Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
+ if (BinOp->hasNoSignedWrap())
+ Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
+ if (Flags == SCEV::FlagAnyWrap) {
+ return SCEV::FlagAnyWrap;
+ }
+
+ // Here we check that BinOp is in the header of the innermost loop
+ // containing BinOp, since we only deal with instructions in the loop
+ // header. The actual loop we need to check later will come from an add
+ // recurrence, but getting that requires computing the SCEV of the operands,
+ // which can be expensive. This check we can do cheaply to rule out some
+ // cases early.
+ Loop *innermostContainingLoop = LI->getLoopFor(BinOp->getParent());
+ if (innermostContainingLoop == nullptr ||
+ innermostContainingLoop->getHeader() != BinOp->getParent())
+ return SCEV::FlagAnyWrap;
+
+ // Only proceed if we can prove that BinOp does not yield poison.
+ if (!isKnownNotFullPoison(BinOp)) return SCEV::FlagAnyWrap;
+
+ // At this point we know that if V is executed, then it does not wrap
+ // according to at least one of NSW or NUW. If V is not executed, then we do
+ // not know if the calculation that V represents would wrap. Multiple
+ // instructions can map to the same SCEV. If we apply NSW or NUW from V to
+ // the SCEV, we must guarantee no wrapping for that SCEV also when it is
+ // derived from other instructions that map to the same SCEV. We cannot make
+ // that guarantee for cases where V is not executed. So we need to find the
+ // loop that V is considered in relation to and prove that V is executed for
+ // every iteration of that loop. That implies that the value that V
+ // calculates does not wrap anywhere in the loop, so then we can apply the
+ // flags to the SCEV.
+ //
+ // We check isLoopInvariant to disambiguate in case we are adding two
+ // recurrences from different loops, so that we know which loop to prove
+ // that V is executed in.
+ for (int OpIndex = 0; OpIndex < 2; ++OpIndex) {
+ const SCEV *Op = getSCEV(BinOp->getOperand(OpIndex));
+ if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
+ const int OtherOpIndex = 1 - OpIndex;
+ const SCEV *OtherOp = getSCEV(BinOp->getOperand(OtherOpIndex));
+ if (isLoopInvariant(OtherOp, AddRec->getLoop()) &&
+ isGuaranteedToExecuteForEveryIteration(BinOp, AddRec->getLoop()))
+ return Flags;
+ }
+ }
+ return SCEV::FlagAnyWrap;
+}
+
+/// createSCEV - We know that there is no SCEV for the specified value. Analyze
+/// the expression.
///
const SCEV *ScalarEvolution::createSCEV(Value *V) {
if (!isSCEVable(V->getType()))
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
//
- // Don't apply this instruction's NSW or NUW flags to the new
- // expression. The instruction may be guarded by control flow that the
- // no-wrap behavior depends on. Non-control-equivalent instructions can be
- // mapped to the same SCEV expression, and it would be incorrect to transfer
- // NSW/NUW semantics to those operations.
+ // FIXME: Expand this handling of NSW and NUW to other instructions, like
+ // sub and mul.
SmallVector<const SCEV *, 4> AddOps;
- AddOps.push_back(getSCEV(U->getOperand(1)));
- for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
- unsigned Opcode = Op->getValueID() - Value::InstructionVal;
- if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+ for (Value *Op = U;; Op = U->getOperand(0)) {
+ U = dyn_cast<Operator>(Op);
+ unsigned Opcode = U ? U->getOpcode() : 0;
+ if (!U || (Opcode != Instruction::Add && Opcode != Instruction::Sub)) {
+ assert(Op != V && "V should be an add");
+ AddOps.push_back(getSCEV(Op));
break;
- U = cast<Operator>(Op);
+ }
+
+ if (auto *OpSCEV = getExistingSCEV(Op)) {
+ AddOps.push_back(OpSCEV);
+ break;
+ }
+
+ // If a NUW or NSW flag can be applied to the SCEV for this
+ // addition, then compute the SCEV for this addition by itself
+ // with a separate call to getAddExpr. We need to do that
+ // instead of pushing the operands of the addition onto AddOps,
+ // since the flags are only known to apply to this particular
+ // addition - they may not apply to other additions that can be
+ // formed with operands from AddOps.
+ //
+ // FIXME: Expand this to sub instructions.
+ if (Opcode == Instruction::Add && isa<BinaryOperator>(U)) {
+ SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(U);
+ if (Flags != SCEV::FlagAnyWrap) {
+ AddOps.push_back(getAddExpr(getSCEV(U->getOperand(0)),
+ getSCEV(U->getOperand(1)), Flags));
+ break;
+ }
+ }
+
const SCEV *Op1 = getSCEV(U->getOperand(1));
if (Opcode == Instruction::Sub)
AddOps.push_back(getNegativeSCEV(Op1));
else
AddOps.push_back(Op1);
}
- AddOps.push_back(getSCEV(U->getOperand(0)));
return getAddExpr(AddOps);
}
+
case Instruction::Mul: {
- // Don't transfer NSW/NUW for the same reason as AddExpr.
+ // FIXME: Transfer NSW/NUW as in AddExpr.
SmallVector<const SCEV *, 4> MulOps;
MulOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0);
unsigned TZ = A.countTrailingZeros();
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, 0, AC,
- nullptr, DT);
+ computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
+ F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
}
/// getExact - Get the exact loop backedge taken count considering all loop
-/// exits. A computable result can only be return for loops with a single exit.
-/// Returning the minimum taken count among all exits is incorrect because one
-/// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
-/// the limit of each loop test is never skipped. This is a valid assumption as
-/// long as the loop exits via that test. For precise results, it is the
-/// caller's responsibility to specify the relevant loop exit using
+/// exits. A computable result can only be returned for loops with a single
+/// exit. Returning the minimum taken count among all exits is incorrect
+/// because one of the loop's exit limit's may have been skipped. HowFarToZero
+/// assumes that the limit of each loop test is never skipped. This is a valid
+/// assumption as long as the loop exits via that test. For precise results, it
+/// is the caller's responsibility to specify the relevant loop exit using
/// getExact(ExitingBlock, SE).
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
if (!Pred)
return getCouldNotCompute();
TerminatorInst *PredTerm = Pred->getTerminator();
- for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
- BasicBlock *PredSucc = PredTerm->getSuccessor(i);
+ for (const BasicBlock *PredSucc : PredTerm->successors()) {
if (PredSucc == BB)
continue;
// If the predecessor has a successor that isn't BB and isn't
if (!L->contains(I)) return false;
if (isa<PHINode>(I)) {
- if (L->getHeader() == I->getParent())
- return true;
- else
- // We don't currently keep track of the control flow needed to evaluate
- // PHIs, so we cannot handle PHIs inside of loops.
- return false;
+ // We don't currently keep track of the control flow needed to evaluate
+ // PHIs, so we cannot handle PHIs inside of loops.
+ return L->getHeader() == I->getParent();
}
// If we won't be able to constant fold this expression even if the operands
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
unsigned NumIterations = BEs.getZExtValue(); // must be in range
unsigned IterationNum = 0;
+ const DataLayout &DL = F->getParent()->getDataLayout();
for (; ; ++IterationNum) {
if (IterationNum == NumIterations)
return RetVal = CurrentIterVals[PN]; // Got exit value!
// Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
DenseMap<Instruction *, Constant *> NextIterVals;
- Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
- TLI);
+ Constant *NextPHI =
+ EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
if (!NextPHI)
return nullptr; // Couldn't evaluate!
NextIterVals[PN] = NextPHI;
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
// "ExitWhen".
-
unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
+ const DataLayout &DL = F->getParent()->getDataLayout();
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
- ConstantInt *CondVal =
- dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
- DL, TLI));
+ ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
+ EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
if (PTy->getElementType()->isStructTy())
C2 = ConstantExpr::getIntegerCast(
C2, Type::getInt32Ty(C->getContext()), true);
- C = ConstantExpr::getGetElementPtr(C, C2);
+ C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
} else
C = ConstantExpr::getAdd(C, C2);
}
// Check to see if getSCEVAtScope actually made an improvement.
if (MadeImprovement) {
Constant *C = nullptr;
+ const DataLayout &DL = F->getParent()->getDataLayout();
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], DL,
- TLI);
+ C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
+ Operands[1], DL, TLI);
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
} else
- C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- Operands, DL, TLI);
+ C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
+ DL, TLI);
if (!C) return V;
return getSCEV(C);
}
return isKnownPredicateWithRanges(Pred, LHS, RHS);
}
+bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred,
+ bool &Increasing) {
+ bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing);
+
+#ifndef NDEBUG
+ // Verify an invariant: inverting the predicate should turn a monotonically
+ // increasing change to a monotonically decreasing one, and vice versa.
+ bool IncreasingSwapped;
+ bool ResultSwapped = isMonotonicPredicateImpl(
+ LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped);
+
+ assert(Result == ResultSwapped && "should be able to analyze both!");
+ if (ResultSwapped)
+ assert(Increasing == !IncreasingSwapped &&
+ "monotonicity should flip as we flip the predicate");
+#endif
+
+ return Result;
+}
+
+bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred,
+ bool &Increasing) {
+
+ // A zero step value for LHS means the induction variable is essentially a
+ // loop invariant value. We don't really depend on the predicate actually
+ // flipping from false to true (for increasing predicates, and the other way
+ // around for decreasing predicates), all we care about is that *if* the
+ // predicate changes then it only changes from false to true.
+ //
+ // A zero step value in itself is not very useful, but there may be places
+ // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
+ // as general as possible.
+
+ switch (Pred) {
+ default:
+ return false; // Conservative answer
+
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE:
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE:
+ if (!LHS->getNoWrapFlags(SCEV::FlagNUW))
+ return false;
+
+ Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE;
+ return true;
+
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE:
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLE: {
+ if (!LHS->getNoWrapFlags(SCEV::FlagNSW))
+ return false;
+
+ const SCEV *Step = LHS->getStepRecurrence(*this);
+
+ if (isKnownNonNegative(Step)) {
+ Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE;
+ return true;
+ }
+
+ if (isKnownNonPositive(Step)) {
+ Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE;
+ return true;
+ }
+
+ return false;
+ }
+
+ }
+
+ llvm_unreachable("switch has default clause!");
+}
+
+bool ScalarEvolution::isLoopInvariantPredicate(
+ ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
+ ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS,
+ const SCEV *&InvariantRHS) {
+
+ // If there is a loop-invariant, force it into the RHS, otherwise bail out.
+ if (!isLoopInvariant(RHS, L)) {
+ if (!isLoopInvariant(LHS, L))
+ return false;
+
+ std::swap(LHS, RHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ }
+
+ const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
+ if (!ArLHS || ArLHS->getLoop() != L)
+ return false;
+
+ bool Increasing;
+ if (!isMonotonicPredicate(ArLHS, Pred, Increasing))
+ return false;
+
+ // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
+ // true as the loop iterates, and the backedge is control dependent on
+ // "ArLHS `Pred` RHS" == true then we can reason as follows:
+ //
+ // * if the predicate was false in the first iteration then the predicate
+ // is never evaluated again, since the loop exits without taking the
+ // backedge.
+ // * if the predicate was true in the first iteration then it will
+ // continue to be true for all future iterations since it is
+ // monotonically increasing.
+ //
+ // For both the above possibilities, we can replace the loop varying
+ // predicate with its value on the first iteration of the loop (which is
+ // loop invariant).
+ //
+ // A similar reasoning applies for a monotonically decreasing predicate, by
+ // replacing true with false and false with true in the above two bullets.
+
+ auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
+
+ if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
+ return false;
+
+ InvariantPred = Pred;
+ InvariantLHS = ArLHS->getStart();
+ InvariantRHS = RHS;
+ return true;
+}
+
bool
ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS) {
return true;
}
+ struct ClearWalkingBEDominatingCondsOnExit {
+ ScalarEvolution &SE;
+
+ explicit ClearWalkingBEDominatingCondsOnExit(ScalarEvolution &SE)
+ : SE(SE){}
+
+ ~ClearWalkingBEDominatingCondsOnExit() {
+ SE.WalkingBEDominatingConds = false;
+ }
+ };
+
+ // We don't want more than one activation of the following loop on the stack
+ // -- that can lead to O(n!) time complexity.
+ if (WalkingBEDominatingConds)
+ return false;
+
+ WalkingBEDominatingConds = true;
+ ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
+
+ // If the loop is not reachable from the entry block, we risk running into an
+ // infinite loop as we walk up into the dom tree. These loops do not matter
+ // anyway, so we just return a conservative answer when we see them.
+ if (!DT->isReachableFromEntry(L->getHeader()))
+ return false;
+
+ for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()];
+ DTN != HeaderDTN;
+ DTN = DTN->getIDom()) {
+
+ assert(DTN && "should reach the loop header before reaching the root!");
+
+ BasicBlock *BB = DTN->getBlock();
+ BasicBlock *PBB = BB->getSinglePredecessor();
+ if (!PBB)
+ continue;
+
+ BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
+ if (!ContinuePredicate || !ContinuePredicate->isConditional())
+ continue;
+
+ Value *Condition = ContinuePredicate->getCondition();
+
+ // If we have an edge `E` within the loop body that dominates the only
+ // latch, the condition guarding `E` also guards the backedge. This
+ // reasoning works only for loops with a single latch.
+
+ BasicBlockEdge DominatingEdge(PBB, BB);
+ if (DominatingEdge.isSingleEdge()) {
+ // We're constructively (and conservatively) enumerating edges within the
+ // loop body that dominate the latch. The dominator tree better agree
+ // with us on this:
+ assert(DT->dominates(DominatingEdge, Latch) && "should be!");
+
+ if (isImpliedCond(Pred, LHS, RHS, Condition,
+ BB != ContinuePredicate->getSuccessor(0)))
+ return true;
+ }
+ }
+
return false;
}
ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
if (!ICI) return false;
- // Bail if the ICmp's operands' types are wider than the needed type
- // before attempting to call getSCEV on them. This avoids infinite
- // recursion, since the analysis of widening casts can require loop
- // exit condition information for overflow checking, which would
- // lead back here.
- if (getTypeSizeInBits(LHS->getType()) <
- getTypeSizeInBits(ICI->getOperand(0)->getType()))
- return false;
-
// Now that we found a conditional branch that dominates the loop or controls
// the loop latch. Check to see if it is the comparison we are looking for.
ICmpInst::Predicate FoundPred;
const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
- // Balance the types. The case where FoundLHS' type is wider than
- // LHS' type is checked for above.
- if (getTypeSizeInBits(LHS->getType()) >
+ // Balance the types.
+ if (getTypeSizeInBits(LHS->getType()) <
+ getTypeSizeInBits(FoundLHS->getType())) {
+ if (CmpInst::isSigned(Pred)) {
+ LHS = getSignExtendExpr(LHS, FoundLHS->getType());
+ RHS = getSignExtendExpr(RHS, FoundLHS->getType());
+ } else {
+ LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
+ RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
+ }
+ } else if (getTypeSizeInBits(LHS->getType()) >
getTypeSizeInBits(FoundLHS->getType())) {
if (CmpInst::isSigned(FoundPred)) {
FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS,
const SCEV *FoundRHS) {
+ if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
+ return true;
+
return isImpliedCondOperandsHelper(Pred, LHS, RHS,
FoundLHS, FoundRHS) ||
// ~x < ~y --> x > y
return false;
}
+/// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands.
+/// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1".
+bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS,
+ const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS) {
+ if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
+ // The restriction on `FoundRHS` be lifted easily -- it exists only to
+ // reduce the compile time impact of this optimization.
+ return false;
+
+ const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS);
+ if (!AddLHS || AddLHS->getOperand(1) != FoundLHS ||
+ !isa<SCEVConstant>(AddLHS->getOperand(0)))
+ return false;
+
+ APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getValue()->getValue();
+
+ // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
+ // antecedent "`FoundLHS` `Pred` `FoundRHS`".
+ ConstantRange FoundLHSRange =
+ ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
+
+ // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range
+ // for `LHS`:
+ APInt Addend =
+ cast<SCEVConstant>(AddLHS->getOperand(0))->getValue()->getValue();
+ ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend));
+
+ // We can also compute the range of values for `LHS` that satisfy the
+ // consequent, "`LHS` `Pred` `RHS`":
+ APInt ConstRHS = cast<SCEVConstant>(RHS)->getValue()->getValue();
+ ConstantRange SatisfyingLHSRange =
+ ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
+
+ // The antecedent implies the consequent if every value of `LHS` that
+ // satisfies the antecedent also satisfies the consequent.
+ return SatisfyingLHSRange.contains(LHSRange);
+}
+
// Verify if an linear IV with positive stride can overflow when in a
// less-than comparison, knowing the invariant term of the comparison, the
// stride and the knowledge of NSW/NUW flags on the recurrence.
}
/// Find parametric terms in this SCEVAddRecExpr.
-void SCEVAddRecExpr::collectParametricTerms(
- ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Terms) const {
+void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Terms) {
SmallVector<const SCEV *, 4> Strides;
- SCEVCollectStrides StrideCollector(SE, Strides);
- visitAll(this, StrideCollector);
+ SCEVCollectStrides StrideCollector(*this, Strides);
+ visitAll(Expr, StrideCollector);
DEBUG({
dbgs() << "Strides:\n";
/// Third step of delinearization: compute the access functions for the
/// Subscripts based on the dimensions in Sizes.
-void SCEVAddRecExpr::computeAccessFunctions(
- ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Subscripts,
- SmallVectorImpl<const SCEV *> &Sizes) const {
+void ScalarEvolution::computeAccessFunctions(
+ const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes) {
// Early exit in case this SCEV is not an affine multivariate function.
- if (Sizes.empty() || !this->isAffine())
+ if (Sizes.empty())
return;
- const SCEV *Res = this;
+ if (auto AR = dyn_cast<SCEVAddRecExpr>(Expr))
+ if (!AR->isAffine())
+ return;
+
+ const SCEV *Res = Expr;
int Last = Sizes.size() - 1;
for (int i = Last; i >= 0; i--) {
const SCEV *Q, *R;
- SCEVDivision::divide(SE, Res, Sizes[i], &Q, &R);
+ SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
DEBUG({
dbgs() << "Res: " << *Res << "\n";
/// asking for the SCEV of the memory access with respect to all enclosing
/// loops, calling SCEV->delinearize on that and printing the results.
-void SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
+void ScalarEvolution::delinearize(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const {
+ const SCEV *ElementSize) {
// First step: collect parametric terms.
SmallVector<const SCEV *, 4> Terms;
- collectParametricTerms(SE, Terms);
+ collectParametricTerms(Expr, Terms);
if (Terms.empty())
return;
// Second step: find subscript sizes.
- SE.findArrayDimensions(Terms, Sizes, ElementSize);
+ findArrayDimensions(Terms, Sizes, ElementSize);
if (Sizes.empty())
return;
// Third step: compute the access functions for each subscript.
- computeAccessFunctions(SE, Subscripts, Sizes);
+ computeAccessFunctions(Expr, Subscripts, Sizes);
if (Subscripts.empty())
return;
DEBUG({
- dbgs() << "succeeded to delinearize " << *this << "\n";
+ dbgs() << "succeeded to delinearize " << *Expr << "\n";
dbgs() << "ArrayDecl[UnknownSize]";
for (const SCEV *S : Sizes)
dbgs() << "[" << *S << "]";
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64),
- BlockDispositions(64), FirstUnknown(nullptr) {
+ : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64),
+ LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) {
initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
}
this->F = &F;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
}
assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
+ assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
- AU.addRequired<AssumptionCacheTracker>();
+ AU.addRequiredTransitive<AssumptionCacheTracker>();
AU.addRequiredTransitive<LoopInfoWrapperPass>();
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
- AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
}
bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {