#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Operator.h"
errs() << '\n';
}
-void SCEV::print(std::ostream &o) const {
- raw_os_ostream OS(o);
- print(OS);
-}
-
bool SCEV::isZero() const {
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
return SC->getValue()->isZero();
OS << "}<" << L->getHeader()->getName() + ">";
}
+void SCEVFieldOffsetExpr::print(raw_ostream &OS) const {
+ // LLVM struct fields don't have names, so just print the field number.
+ OS << "offsetof(" << *STy << ", " << FieldNo << ")";
+}
+
+void SCEVAllocSizeExpr::print(raw_ostream &OS) const {
+ OS << "sizeof(" << *AllocTy << ")";
+}
+
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// All non-instruction values are loop invariant. All instructions are loop
// invariant if they are not contained in the specified loop.
// SCEV Utilities
//===----------------------------------------------------------------------===//
+static bool CompareTypes(const Type *A, const Type *B) {
+ if (A->getTypeID() != B->getTypeID())
+ return A->getTypeID() < B->getTypeID();
+ if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
+ const IntegerType *BI = cast<IntegerType>(B);
+ return AI->getBitWidth() < BI->getBitWidth();
+ }
+ if (const PointerType *AI = dyn_cast<PointerType>(A)) {
+ const PointerType *BI = cast<PointerType>(B);
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
+ const ArrayType *BI = cast<ArrayType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const VectorType *AI = dyn_cast<VectorType>(A)) {
+ const VectorType *BI = cast<VectorType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const StructType *AI = dyn_cast<StructType>(A)) {
+ const StructType *BI = cast<StructType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
+ if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
+ CompareTypes(BI->getElementType(i), AI->getElementType(i)))
+ return CompareTypes(AI->getElementType(i), BI->getElementType(i));
+ }
+ return false;
+}
+
namespace {
/// SCEVComplexityCompare - Return true if the complexity of the LHS is less
/// than the complexity of the RHS. This comparator is used to canonicalize
explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
bool operator()(const SCEV *LHS, const SCEV *RHS) const {
+ // Fast-path: SCEVs are uniqued so we can do a quick equality check.
+ if (LHS == RHS)
+ return false;
+
// Primarily, sort the SCEVs by their getSCEVType().
if (LHS->getSCEVType() != RHS->getSCEVType())
return LHS->getSCEVType() < RHS->getSCEVType();
return operator()(LC->getOperand(), RC->getOperand());
}
+ // Compare offsetof expressions.
+ if (const SCEVFieldOffsetExpr *LA = dyn_cast<SCEVFieldOffsetExpr>(LHS)) {
+ const SCEVFieldOffsetExpr *RA = cast<SCEVFieldOffsetExpr>(RHS);
+ if (CompareTypes(LA->getStructType(), RA->getStructType()) ||
+ CompareTypes(RA->getStructType(), LA->getStructType()))
+ return CompareTypes(LA->getStructType(), RA->getStructType());
+ return LA->getFieldNo() < RA->getFieldNo();
+ }
+
+ // Compare sizeof expressions by the allocation type.
+ if (const SCEVAllocSizeExpr *LA = dyn_cast<SCEVAllocSizeExpr>(LHS)) {
+ const SCEVAllocSizeExpr *RA = cast<SCEVAllocSizeExpr>(RHS);
+ return CompareTypes(LA->getAllocType(), RA->getAllocType());
+ }
+
llvm_unreachable("Unknown SCEV kind!");
return false;
}
MultiplyFactor = MultiplyFactor.trunc(W);
// Calculate the product, at width T+W
- const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
+ const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
+ CalculationBits);
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
// If we have special knowledge that this addrec won't overflow,
// we don't need to do any further analysis.
- if (AR->hasNoUnsignedOverflow())
+ if (AR->hasNoUnsignedWrap())
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getZeroExtendExpr(Step, Ty),
L);
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(BitWidth * 2);
+ const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
const SCEV *ZMul =
getMulExpr(CastedMaxBECount,
// If we have special knowledge that this addrec won't overflow,
// we don't need to do any further analysis.
- if (AR->hasNoSignedOverflow())
+ if (AR->hasNoSignedWrap())
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
L);
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(BitWidth * 2);
+ const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
const SCEV *SMul =
getMulExpr(CastedMaxBECount,
/// unspecified bits out to the given type.
///
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ const Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
return S;
}
-/// getUDivExpr - Get a canonical multiply expression, or something simpler if
-/// possible.
+/// getUDivExpr - Get a canonical unsigned division expression, or something
+/// simpler if possible.
const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *RHS) {
assert(getEffectiveSCEVType(LHS->getType()) ==
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
if (RHSC->getValue()->equalsInt(1))
- return LHS; // X udiv 1 --> x
+ return LHS; // X udiv 1 --> x
if (RHSC->isZero())
return getIntegerSCEV(0, LHS->getType()); // value is undefined
if (!RHSC->getValue()->getValue().isPowerOf2())
++MaxShiftAmt;
const IntegerType *ExtTy =
- IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
+ IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
// {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
if (const SCEVConstant *Step =
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
Constant *LHSCV = LHSC->getValue();
Constant *RHSCV = RHSC->getValue();
- return getConstant(cast<ConstantInt>(getContext().getConstantExprUDiv(LHSCV,
+ return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
RHSCV)));
}
}
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
+const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
+ unsigned FieldNo) {
+ // If we have TargetData we can determine the constant offset.
+ if (TD) {
+ const Type *IntPtrTy = TD->getIntPtrType(getContext());
+ const StructLayout &SL = *TD->getStructLayout(STy);
+ uint64_t Offset = SL.getElementOffset(FieldNo);
+ return getIntegerSCEV(Offset, IntPtrTy);
+ }
+
+ // Field 0 is always at offset 0.
+ if (FieldNo == 0) {
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ return getIntegerSCEV(0, Ty);
+ }
+
+ // Okay, it looks like we really DO need an offsetof expr. Check to see if we
+ // already have one, otherwise create a new one.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scFieldOffset);
+ ID.AddPointer(STy);
+ ID.AddInteger(FieldNo);
+ void *IP = 0;
+ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ SCEV *S = SCEVAllocator.Allocate<SCEVFieldOffsetExpr>();
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ new (S) SCEVFieldOffsetExpr(ID, Ty, STy, FieldNo);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+}
+
+const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) {
+ // If we have TargetData we can determine the constant size.
+ if (TD && AllocTy->isSized()) {
+ const Type *IntPtrTy = TD->getIntPtrType(getContext());
+ return getIntegerSCEV(TD->getTypeAllocSize(AllocTy), IntPtrTy);
+ }
+
+ // Expand an array size into the element size times the number
+ // of elements.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(AllocTy)) {
+ const SCEV *E = getAllocSizeExpr(ATy->getElementType());
+ return getMulExpr(
+ E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
+ ATy->getNumElements())));
+ }
+
+ // Expand a vector size into the element size times the number
+ // of elements.
+ if (const VectorType *VTy = dyn_cast<VectorType>(AllocTy)) {
+ const SCEV *E = getAllocSizeExpr(VTy->getElementType());
+ return getMulExpr(
+ E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
+ VTy->getNumElements())));
+ }
+
+ // Okay, it looks like we really DO need a sizeof expr. Check to see if we
+ // already have one, otherwise create a new one.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scAllocSize);
+ ID.AddPointer(AllocTy);
+ void *IP = 0;
+ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ SCEV *S = SCEVAllocator.Allocate<SCEVAllocSizeExpr>();
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ new (S) SCEVAllocSizeExpr(ID, Ty, AllocTy);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+}
+
const SCEV *ScalarEvolution::getUnknown(Value *V) {
// Don't attempt to do anything other than create a SCEVUnknown object
// here. createSCEV only calls getUnknown after checking for all other
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
- // Integers are always SCEVable.
- if (Ty->isInteger())
- return true;
-
- // Pointers are SCEVable if TargetData information is available
- // to provide pointer size information.
- if (isa<PointerType>(Ty))
- return TD != NULL;
-
- // Otherwise it's not SCEVable.
- return false;
+ // Integers and pointers are always SCEVable.
+ return Ty->isInteger() || isa<PointerType>(Ty);
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
if (TD)
return TD->getTypeSizeInBits(Ty);
- // Otherwise, we support only integer types.
- assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
- return Ty->getPrimitiveSizeInBits();
+ // Integer types have fixed sizes.
+ if (Ty->isInteger())
+ return Ty->getPrimitiveSizeInBits();
+
+ // The only other support type is pointer. Without TargetData, conservatively
+ // assume pointers are 64-bit.
+ assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!");
+ return 64;
}
/// getEffectiveSCEVType - Return a type with the same bitwidth as
if (Ty->isInteger())
return Ty;
+ // The only other support type is pointer.
assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
- return TD->getIntPtrType();
+ if (TD) return TD->getIntPtrType(getContext());
+
+ // Without TargetData, conservatively assume pointers are 64-bit.
+ return Type::getInt64Ty(getContext());
}
const SCEV *ScalarEvolution::getCouldNotCompute() {
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
return getConstant(
- cast<ConstantInt>(getContext().getConstantExprNeg(VC->getValue())));
+ cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
const Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
return getMulExpr(V,
- getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty))));
+ getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
}
/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
return getConstant(
- cast<ConstantInt>(getContext().getConstantExprNot(VC->getValue())));
+ cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
const Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
const SCEV *AllOnes =
- getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty)));
+ getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
return getMinusSCEV(AllOnes, V);
}
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
const SCEV *
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or zero extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrZeroExtend cannot truncate!");
const SCEV *
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or sign extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrSignExtend cannot truncate!");
const SCEV *
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or any extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrAnyExtend cannot truncate!");
const SCEV *
ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or noop with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
"getTruncateOrNoop cannot extend!");
// count information isn't going to change anything. In the later
// case, createNodeForPHI will perform the necessary updates on its
// own when it gets to that point.
- if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
+ if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
+ ValuesAtScopes.erase(It->second);
Scalars.erase(It);
- ValuesAtScopes.erase(I);
+ }
}
PushDefUseChildren(I, Worklist);
getSCEV(OBO->getOperand(1)) ==
PHISCEV->getStepRecurrence(*this)) {
const SCEVAddRecExpr *PostInc = PHISCEV->getPostIncExpr(*this);
- if (OBO->hasNoUnsignedOverflow()) {
+ if (OBO->hasNoUnsignedWrap()) {
const_cast<SCEVAddRecExpr *>(PHISCEV)
- ->setHasNoUnsignedOverflow(true);
+ ->setHasNoUnsignedWrap(true);
const_cast<SCEVAddRecExpr *>(PostInc)
- ->setHasNoUnsignedOverflow(true);
+ ->setHasNoUnsignedWrap(true);
}
- if (OBO->hasNoSignedOverflow()) {
+ if (OBO->hasNoSignedWrap()) {
const_cast<SCEVAddRecExpr *>(PHISCEV)
- ->setHasNoSignedOverflow(true);
+ ->setHasNoSignedWrap(true);
const_cast<SCEVAddRecExpr *>(PostInc)
- ->setHasNoSignedOverflow(true);
+ ->setHasNoSignedWrap(true);
}
}
///
const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
- const Type *IntPtrTy = TD->getIntPtrType();
+ const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
// Compute the (potentially symbolic) offset in bytes for this index.
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
- const StructLayout &SL = *TD->getStructLayout(STy);
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- uint64_t Offset = SL.getElementOffset(FieldNo);
- TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
+ TotalOffset = getAddExpr(TotalOffset,
+ getFieldOffsetExpr(STy, FieldNo));
} else {
// For an array, add the element offset, explicitly scaled.
const SCEV *LocalOffset = getSCEV(Index);
if (!isa<PointerType>(LocalOffset->getType()))
// Getelementptr indicies are signed.
LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- LocalOffset =
- getMulExpr(LocalOffset,
- getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
+ LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI));
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
}
return getIntegerSCEV(0, V->getType());
else if (isa<UndefValue>(V))
return getIntegerSCEV(0, V->getType());
+ else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
else
return getUnknown(V);
if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
return
getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
- IntegerType::get(BitWidth - LZ)),
+ IntegerType::get(getContext(), BitWidth - LZ)),
U->getType());
}
break;
return getIntegerSCEV(0, U->getType()); // value is undefined
return
getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
- IntegerType::get(Amt)),
+ IntegerType::get(getContext(), Amt)),
U->getType());
}
break;
// expressions we handle are GEPs and address literals.
case Instruction::GetElementPtr:
- if (!TD) break; // Without TD we can't analyze pointers.
return createNodeForGEP(U);
case Instruction::PHI:
// count information isn't going to change anything. In the later
// case, createNodeForPHI will perform the necessary updates on its
// own when it gets to that point.
- if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
+ if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
+ ValuesAtScopes.erase(It->second);
Scalars.erase(It);
- ValuesAtScopes.erase(I);
+ }
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
std::map<SCEVCallbackVH, const SCEV*>::iterator It =
Scalars.find(static_cast<Value *>(I));
if (It != Scalars.end()) {
+ ValuesAtScopes.erase(It->second);
Scalars.erase(It);
- ValuesAtScopes.erase(I);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
if (!isa<SCEVCouldNotCompute>(TC)) return TC;
break;
}
- case ICmpInst::ICMP_EQ: {
- // Convert to: while (X-Y == 0) // while (X == Y)
+ case ICmpInst::ICMP_EQ: { // while (X == Y)
+ // Convert to: while (X-Y == 0)
const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
if (!isa<SCEVCouldNotCompute>(TC)) return TC;
break;
} else if (isa<ConstantAggregateZero>(Init)) {
if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
assert(Idx < STy->getNumElements() && "Bad struct index!");
- Init = Context.getNullValue(STy->getElementType(Idx));
+ Init = Constant::getNullValue(STy->getElementType(Idx));
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
if (Idx >= ATy->getNumElements()) return 0; // Bogus program
- Init = Context.getNullValue(ATy->getElementType());
+ Init = Constant::getNullValue(ATy->getElementType());
} else {
llvm_unreachable("Unknown constant aggregate type!");
}
// Make sure that it is really a constant global we are gepping, with an
// initializer, and make sure the first IDX is really 0.
GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
- if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
+ if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
!cast<Constant>(GEP->getOperand(1))->isNullValue())
return getCouldNotCompute();
}
}
-/// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
+/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
/// constant number of times (the condition evolves only from constants),
/// try to evaluate a few iterations of the loop until we get the exit
/// condition gets a value of ExitWhen (true or false). If we cannot
if (CondVal->getValue() == uint64_t(ExitWhen)) {
++NumBruteForceTripCountsComputed;
- return getConstant(Type::Int32Ty, IterationNum);
+ return getConstant(Type::getInt32Ty(getContext()), IterationNum);
}
// Compute the value of the PHI node for the next iteration.
/// In the case that a relevant loop exit value cannot be computed, the
/// original value V is returned.
const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
- // FIXME: this should be turned into a virtual method on SCEV!
+ // Check to see if we've folded this expression at this loop before.
+ std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
+ std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
+ Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
+ if (!Pair.second)
+ return Pair.first->second ? Pair.first->second : V;
+
+ // Otherwise compute it.
+ const SCEV *C = computeSCEVAtScope(V, L);
+ Pair.first->second = C;
+ return C;
+}
+const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
if (isa<SCEVConstant>(V)) return V;
// If this instruction is evolved from a constant-evolving PHI, compute the
// the arguments into constants, and if so, try to constant propagate the
// result. This is particularly useful for computing loop exit values.
if (CanConstantFold(I)) {
- // Check to see if we've folded this instruction at this loop before.
- std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
- std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
- Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
- if (!Pair.second)
- return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
-
std::vector<Constant*> Operands;
Operands.reserve(I->getNumOperands());
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
getContext());
else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(),
+ &Operands[0], Operands.size(),
getContext());
- Pair.first->second = C;
return getSCEV(C);
}
}
return getTruncateExpr(Op, Cast->getType());
}
+ if (isa<SCEVTargetDataConstant>(V))
+ return V;
+
llvm_unreachable("Unknown SCEV type!");
return 0;
}
// First, handle unitary steps.
if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
- return getNegativeSCEV(Start); // N = -Start (as unsigned)
+ return getNegativeSCEV(Start); // N = -Start (as unsigned)
if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
return Start; // N = Start (as unsigned)
#endif
// Pick the smallest positive root value.
if (ConstantInt *CB =
- dyn_cast<ConstantInt>(getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
+ dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
R1->getValue(), R2->getValue()))) {
if (CB->getZExtValue() == false)
std::swap(R1, R2); // R1 is the minimum root now.
if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
- if (AI->isIdenticalTo(BI))
+ if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
return true;
// Otherwise assume they may have a different value.
// Check Add for unsigned overflow.
// TODO: More sophisticated things could be done here.
- const Type *WideTy = getContext().getIntegerType(getTypeSizeInBits(Ty) + 1);
+ const Type *WideTy = IntegerType::get(getContext(),
+ getTypeSizeInBits(Ty) + 1);
const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
if (R1) {
// Pick the smallest positive root value.
if (ConstantInt *CB =
- dyn_cast<ConstantInt>(
- SE.getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
+ dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
R1->getValue(), R2->getValue()))) {
if (CB->getZExtValue() == false)
std::swap(R1, R2); // R1 is the minimum root now.
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
- SE->ValuesAtScopes.erase(I);
SE->Scalars.erase(getValPtr());
// this now dangles!
}
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- if (Instruction *I = dyn_cast<Instruction>(U))
- SE->ValuesAtScopes.erase(I);
SE->Scalars.erase(U);
for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
UI != UE; ++UI)
if (DeleteOld) {
if (PHINode *PN = dyn_cast<PHINode>(Old))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- if (Instruction *I = dyn_cast<Instruction>(Old))
- SE->ValuesAtScopes.erase(I);
SE->Scalars.erase(Old);
// this now dangles!
}
PrintLoopInfo(OS, &SE, *I);
}
-void ScalarEvolution::print(std::ostream &o, const Module *M) const {
- raw_os_ostream OS(o);
- print(OS, M);
-}