"derived loop"),
cl::init(100));
-static RegisterPass<ScalarEvolution>
-R("scalar-evolution", "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true);
char ScalarEvolution::ID = 0;
//===----------------------------------------------------------------------===//
/// than the complexity of the RHS. This comparator is used to canonicalize
/// expressions.
class SCEVComplexityCompare {
- LoopInfo *LI;
+ const LoopInfo *LI;
public:
- explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
+ explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
bool operator()(const SCEV *LHS, const SCEV *RHS) const {
// Fast-path: SCEVs are uniqued so we can do a quick equality check.
return false;
// Primarily, sort the SCEVs by their getSCEVType().
- if (LHS->getSCEVType() != RHS->getSCEVType())
- return LHS->getSCEVType() < RHS->getSCEVType();
+ unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
+ if (LType != RType)
+ return LType < RType;
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
// Order pointer values after integer values. This helps SCEVExpander
// form GEPs.
- if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
- return false;
- if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
- return true;
+ bool LIsPointer = LU->getType()->isPointerTy(),
+ RIsPointer = RU->getType()->isPointerTy();
+ if (LIsPointer != RIsPointer)
+ return RIsPointer;
// Compare getValueID values.
- if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
- return LU->getValue()->getValueID() < RU->getValue()->getValueID();
+ unsigned LID = LU->getValue()->getValueID(),
+ RID = RU->getValue()->getValueID();
+ if (LID != RID)
+ return LID < RID;
// Sort arguments by their position.
if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
// For instructions, compare their loop depth, and their opcode.
// This is pretty loose.
- if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
- Instruction *RV = cast<Instruction>(RU->getValue());
+ if (const Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
+ const Instruction *RV = cast<Instruction>(RU->getValue());
// Compare loop depths.
- if (LI->getLoopDepth(LV->getParent()) !=
- LI->getLoopDepth(RV->getParent()))
- return LI->getLoopDepth(LV->getParent()) <
- LI->getLoopDepth(RV->getParent());
-
- // Compare opcodes.
- if (LV->getOpcode() != RV->getOpcode())
- return LV->getOpcode() < RV->getOpcode();
+ unsigned LDepth = LI->getLoopDepth(LV->getParent()),
+ RDepth = LI->getLoopDepth(RV->getParent());
+ if (LDepth != RDepth)
+ return LDepth < RDepth;
// Compare the number of operands.
- if (LV->getNumOperands() != RV->getNumOperands())
- return LV->getNumOperands() < RV->getNumOperands();
+ unsigned LNumOps = LV->getNumOperands(),
+ RNumOps = RV->getNumOperands();
+ if (LNumOps != RNumOps)
+ return LNumOps < RNumOps;
}
return false;
// Compare constant values.
if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
const SCEVConstant *RC = cast<SCEVConstant>(RHS);
- if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
- return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
- return LC->getValue()->getValue().ult(RC->getValue()->getValue());
+ const ConstantInt *LCC = LC->getValue();
+ const ConstantInt *RCC = RC->getValue();
+ unsigned LBitWidth = LCC->getBitWidth(), RBitWidth = RCC->getBitWidth();
+ if (LBitWidth != RBitWidth)
+ return LBitWidth < RBitWidth;
+ return LCC->getValue().ult(RCC->getValue());
}
// Compare addrec loop depths.
if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
- if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
- return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
+ unsigned LDepth = LA->getLoop()->getLoopDepth(),
+ RDepth = RA->getLoop()->getLoopDepth();
+ if (LDepth != RDepth)
+ return LDepth < RDepth;
}
// Lexicographically compare n-ary expressions.
if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
- for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
- if (i >= RC->getNumOperands())
+ unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ if (i >= RNumOps)
return false;
- if (operator()(LC->getOperand(i), RC->getOperand(i)))
+ const SCEV *LOp = LC->getOperand(i), *ROp = RC->getOperand(i);
+ if (operator()(LOp, ROp))
return true;
- if (operator()(RC->getOperand(i), LC->getOperand(i)))
+ if (operator()(ROp, LOp))
return false;
}
- return LC->getNumOperands() < RC->getNumOperands();
+ return LNumOps < RNumOps;
}
// Lexicographically compare udiv expressions.
if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
- if (operator()(LC->getLHS(), RC->getLHS()))
+ const SCEV *LL = LC->getLHS(), *LR = LC->getRHS(),
+ *RL = RC->getLHS(), *RR = RC->getRHS();
+ if (operator()(LL, RL))
return true;
- if (operator()(RC->getLHS(), LC->getLHS()))
+ if (operator()(RL, LL))
return false;
- if (operator()(LC->getRHS(), RC->getRHS()))
+ if (operator()(LR, RR))
return true;
- if (operator()(RC->getRHS(), LC->getRHS()))
+ if (operator()(RR, LR))
return false;
return false;
}
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// trunc(trunc(x)) --> trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
return getAddRecExpr(Operands, AddRec->getLoop());
}
- // The cast wasn't folded; create an explicit cast node.
- // Recompute the insert position, as it may have been invalidated.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // As a special case, fold trunc(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
+ // The cast wasn't folded; create an explicit cast node. We can reuse
+ // the existing insert position since if we get here, we won't have
+ // made any changes which would invalidate it.
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
return getAddRecExpr(Ops, AR->getLoop());
}
+ // As a special case, fold anyext(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
// If the expression is obviously signed, use the sext cast value.
if (isa<SCEVSMaxExpr>(Op))
return SExt;
AddRec->op_end());
AddRecOps[0] = getAddExpr(LIOps);
- // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
- // is not associative so this isn't necessarily safe.
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer add and the inner addrec are guaranteed to have no overflow.
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
+ HasNUW && AddRec->hasNoUnsignedWrap(),
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- // It's tempting to propagate the NSW flag here, but nsw multiplication
- // is not associative so this isn't necessarily safe.
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer mul and the inner addrec are guaranteed to have no overflow.
const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
HasNUW && AddRec->hasNoUnsignedWrap(),
- /*HasNSW=*/false);
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
///
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
const SCEV *RHS) {
+ // Fast path: X - X --> 0.
+ if (LHS == RHS)
+ return getConstant(LHS->getType(), 0);
+
// X - Y --> X + -Y
return getAddExpr(LHS, getNegativeSCEV(RHS));
}
// Push the def-use children onto the Worklist stack.
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
- Worklist.push_back(cast<Instruction>(UI));
+ Worklist.push_back(cast<Instruction>(*UI));
}
/// ForgetSymbolicValue - This looks up computed SCEV values for all
///
const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
- bool InBounds = GEP->isInBounds();
+ // Don't blindly transfer the inbounds flag from the GEP instruction to the
+ // Add expression, because the Instruction may be guarded by control flow
+ // and the no-overflow bits may not be valid for the expression in any
+ // context.
+
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- TotalOffset = getAddExpr(TotalOffset,
- getOffsetOfExpr(STy, FieldNo),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+
+ // Add the field offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, FieldOffset);
} else {
// For an array, add the element offset, explicitly scaled.
- const SCEV *LocalOffset = getSCEV(Index);
+ const SCEV *ElementSize = getSizeOfExpr(*GTI);
+ const SCEV *IndexS = getSCEV(Index);
// Getelementptr indices are signed.
- LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- // Lower "inbounds" GEPs to NSW arithmetic.
- LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
- TotalOffset = getAddExpr(TotalOffset, LocalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
+
+ // Multiply the index by the element size to compute the element offset.
+ const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+
+ // Add the element offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
}
- return getAddExpr(getSCEV(Base), TotalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+
+ // Get the SCEV for the GEP base.
+ const SCEV *BaseS = getSCEV(Base);
+
+ // Add the total offset from all the GEP indices to the base.
+ return getAddExpr(BaseS, TotalOffset);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
if (!C->getValue()->isZero())
ConservativeResult =
- ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
+ ConservativeResult.intersectWith(
+ ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
// TODO: non-affine addrec
if (AddRec->isAffine()) {
Operator *U = cast<Operator>(V);
switch (Opcode) {
case Instruction::Add:
- // Don't transfer the NSW and NUW bits from the Add instruction to the
- // Add expression, because the Instruction may be guarded by control
- // flow and the no-overflow bits may not be valid for the expression in
- // any context.
return getAddExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
case Instruction::Mul:
- // Don't transfer the NSW and NUW bits from the Mul instruction to the
- // Mul expression, as with Add.
return getMulExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
case Instruction::UDiv:
/// changed a value in a way that may effect its value, or which may
/// disconnect it from a def-use chain linking it to a loop.
void ScalarEvolution::forgetValue(Value *V) {
+ // If there's a SCEVUnknown tying this value into the SCEV
+ // space, remove it from the folding set map. The SCEVUnknown
+ // object and any other SCEV objects which reference it
+ // (transitively) remain allocated, effectively leaked until
+ // the underlying BumpPtrAllocator is freed.
+ //
+ // This permits SCEV pointers to be used as keys in maps
+ // such as the ValuesAtScopes map.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scUnknown);
+ ID.AddPointer(V);
+ void *IP;
+ if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+ UniqueSCEVs.RemoveNode(S);
+
+ // This isn't necessary, but we might as well remove the
+ // value from the ValuesAtScopes map too.
+ ValuesAtScopes.erase(S);
+ }
+
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return;
// the arguments into constants, and if so, try to constant propagate the
// result. This is particularly useful for computing loop exit values.
if (CanConstantFold(I)) {
- std::vector<Constant*> Operands;
- Operands.reserve(I->getNumOperands());
+ SmallVector<Constant *, 4> Operands;
+ bool MadeImprovement = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *Op = I->getOperand(i);
if (Constant *C = dyn_cast<Constant>(Op)) {
Operands.push_back(C);
- } else {
- // If any of the operands is non-constant and if they are
- // non-integer and non-pointer, don't even try to analyze them
- // with scev techniques.
- if (!isSCEVable(Op->getType()))
- return V;
-
- const SCEV *OpV = getSCEVAtScope(Op, L);
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
- Constant *C = SC->getValue();
- if (C->getType() != Op->getType())
- C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
- if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
- if (C->getType() != Op->getType())
- C =
- ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else
- return V;
- } else {
- return V;
- }
+ continue;
}
+
+ // If any of the operands is non-constant and if they are
+ // non-integer and non-pointer, don't even try to analyze them
+ // with scev techniques.
+ if (!isSCEVable(Op->getType()))
+ return V;
+
+ const SCEV *OrigV = getSCEV(Op);
+ const SCEV *OpV = getSCEVAtScope(OrigV, L);
+ MadeImprovement |= OrigV != OpV;
+
+ Constant *C = 0;
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
+ C = SC->getValue();
+ if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
+ C = dyn_cast<Constant>(SU->getValue());
+ if (!C) return V;
+ if (C->getType() != Op->getType())
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ Op->getType(),
+ false),
+ C, Op->getType());
+ Operands.push_back(C);
}
- Constant *C = 0;
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD);
- else
- C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(), TD);
- if (C)
+ // Check to see if getSCEVAtScope actually made an improvement.
+ if (MadeImprovement) {
+ Constant *C = 0;
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ C = ConstantFoldCompareInstOperands(CI->getPredicate(),
+ Operands[0], Operands[1], TD);
+ else
+ C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+ &Operands[0], Operands.size(), TD);
+ if (!C) return V;
return getSCEV(C);
+ }
}
}
// If this is a loop recurrence for a loop that does not contain L, then we
// are dealing with the final value computed by the loop.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
- if (!L || !AddRec->getLoop()->contains(L)) {
+ // First, attempt to evaluate each operand.
+ // Avoid performing the look-up in the common case where the specified
+ // expression has no loop-variant portions.
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
+ const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
+ if (OpAtScope == AddRec->getOperand(i))
+ continue;
+
+ // Okay, at least one of these operands is loop variant but might be
+ // foldable. Build a new instance of the folded commutative expression.
+ SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
+ AddRec->op_begin()+i);
+ NewOps.push_back(OpAtScope);
+ for (++i; i != e; ++i)
+ NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
+
+ AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
+ break;
+ }
+
+ // If the scope is outside the addrec's loop, evaluate it by using the
+ // loop exit value of the addrec.
+ if (!AddRec->getLoop()->contains(L)) {
// To evaluate this recurrence, we need to know how many times the AddRec
// loop iterates. Compute this now.
const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
// Then, evaluate the AddRec.
return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
}
+
return AddRec;
}
// this now dangles!
}
-void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
+void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
+ Value *Old = getValPtr();
+
+ // If there's a SCEVUnknown tying this value into the SCEV
+ // space, replace the SCEVUnknown's value with the new value
+ // for the benefit of any SCEVs still referencing it, and
+ // and remove it from the folding set map so that new scevs
+ // don't reference it.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scUnknown);
+ ID.AddPointer(Old);
+ void *IP;
+ if (SCEVUnknown *S = cast_or_null<SCEVUnknown>(
+ SE->UniqueSCEVs.FindNodeOrInsertPos(ID, IP))) {
+ S->V = V;
+ SE->UniqueSCEVs.RemoveNode(S);
+ SE->ValuesAtScopes.erase(S);
+ }
+
// Forget all the expressions associated with users of the old value,
// so that future queries will recompute the expressions using the new
// value.
SmallVector<User *, 16> Worklist;
SmallPtrSet<User *, 8> Visited;
- Value *Old = getValPtr();
- bool DeleteOld = false;
for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
User *U = Worklist.pop_back_val();
// Deleting the Old value will cause this to dangle. Postpone
// that until everything else is done.
- if (U == Old) {
- DeleteOld = true;
+ if (U == Old)
continue;
- }
if (!Visited.insert(U))
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
UI != UE; ++UI)
Worklist.push_back(*UI);
}
- // Delete the Old value if it (indirectly) references itself.
- if (DeleteOld) {
- if (PHINode *PN = dyn_cast<PHINode>(Old))
- SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(Old);
- // this now dangles!
- }
- // this may dangle!
+ // Delete the Old value.
+ if (PHINode *PN = dyn_cast<PHINode>(Old))
+ SE->ConstantEvolutionLoopExitValue.erase(PN);
+ SE->Scalars.erase(Old);
+ // this now dangles!
}
ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)