#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm>
using namespace llvm;
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
- const TargetData *TD;
- const Type *UIntPtrTy;
bool Changed;
/// IVUsesByStride - Keep track of all uses of induction variables that we
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<LoopInfo>();
AU.addRequired<DominatorTree>();
- AU.addRequired<TargetData>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<ScalarEvolution>();
}
-private:
+ private:
bool AddUsersIfInteresting(Instruction *I, Loop *L,
SmallPtrSet<Instruction*,16> &Processed);
ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
// This is very common, put it first.
if (isa<SCEVConstant>(S))
return false;
- if (SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
+ if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
for (unsigned int i=0; i< AE->getNumOperands(); i++)
if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
return true;
return false;
}
- if (SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
+ if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
if (const Loop *newLoop = AE->getLoop()) {
if (newLoop == L)
return false;
}
return true;
}
- if (SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
+ if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
containsAddRecFromDifferentLoop(DE->getRHS(), L);
#if 0
// SCEVSDivExpr has been backed out temporarily, but will be back; we'll
// need this when it is.
- if (SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
+ if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
containsAddRecFromDifferentLoop(DE->getRHS(), L);
#endif
- if (SCEVTruncateExpr *TE = dyn_cast<SCEVTruncateExpr>(S))
- return containsAddRecFromDifferentLoop(TE->getOperand(), L);
- if (SCEVZeroExtendExpr *ZE = dyn_cast<SCEVZeroExtendExpr>(S))
- return containsAddRecFromDifferentLoop(ZE->getOperand(), L);
- if (SCEVSignExtendExpr *SE = dyn_cast<SCEVSignExtendExpr>(S))
- return containsAddRecFromDifferentLoop(SE->getOperand(), L);
+ if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
+ return containsAddRecFromDifferentLoop(CE->getOperand(), L);
return false;
}
// If the outer level is an AddExpr, the operands are all start values except
// for a nested AddRecExpr.
- if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
+ if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
- if (SCEVAddRecExpr *AddRec =
+ if (const SCEVAddRecExpr *AddRec =
dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
if (AddRec->getLoop() == L)
TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
return false; // not analyzable.
}
- SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
if (!AddRec || AddRec->getLoop() != L) return false;
// FIXME: Generalize to non-affine IV's.
}
// Okay, all uses of IV by PN are in predecessor blocks that really are
- // dominated by the latch block. Split the critical edges and use the
- // post-incremented value.
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == IV) {
- SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
- // Splitting the critical edge can reduce the number of entries in this
- // PHI.
- e = PN->getNumIncomingValues();
- if (--NumUses == 0) break;
- }
-
- // PHI node might have become a constant value after SplitCriticalEdge.
- DeadInsts.push_back(User);
-
+ // dominated by the latch block. Use the post-incremented value.
return true;
}
/// return true. Otherwise, return false.
bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
SmallPtrSet<Instruction*,16> &Processed) {
- if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
+ if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
- if (TD->getTypeSizeInBits(I->getType()) > 64)
+ if (SE->getTypeSizeInBits(I->getType()) > 64)
return false;
if (!Processed.insert(I))
/// mode, and does not need to be put in a register first.
static bool fitsInAddressMode(const SCEVHandle &V, const Type *UseTy,
const TargetLowering *TLI, bool HasBaseReg) {
- if (SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
int64_t VC = SC->getValue()->getSExtValue();
if (TLI) {
TargetLowering::AddrMode AM;
}
}
- if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
+ if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
- TargetLowering::AddrMode AM;
- AM.BaseGV = GV;
- AM.HasBaseReg = HasBaseReg;
- return TLI->isLegalAddressingMode(AM, UseTy);
+ if (TLI) {
+ TargetLowering::AddrMode AM;
+ AM.BaseGV = GV;
+ AM.HasBaseReg = HasBaseReg;
+ return TLI->isLegalAddressingMode(AM, UseTy);
+ } else {
+ // Default: assume global addresses are not legal.
+ }
}
return false;
Loop *L, ScalarEvolution *SE) {
if (Val->isLoopInvariant(L)) return; // Nothing to do.
- if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
+ if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
std::vector<SCEVHandle> NewOps;
NewOps.reserve(SAE->getNumOperands());
Val = SE->getIntegerSCEV(0, Val->getType());
else
Val = SE->getAddExpr(NewOps);
- } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
+ } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
// Try to pull immediates out of the start value of nested addrec's.
SCEVHandle Start = SARE->getStart();
MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
SCEVHandle &Val, SCEVHandle &Imm,
bool isAddress, Loop *L,
ScalarEvolution *SE) {
- if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
+ if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
std::vector<SCEVHandle> NewOps;
NewOps.reserve(SAE->getNumOperands());
else
Val = SE->getAddExpr(NewOps);
return;
- } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
+ } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
// Try to pull immediates out of the start value of nested addrec's.
SCEVHandle Start = SARE->getStart();
MoveImmediateValues(TLI, UseTy, Start, Imm, isAddress, L, SE);
Val = SE->getAddRecExpr(Ops, SARE->getLoop());
}
return;
- } else if (SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
+ } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
// Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
if (isAddress && fitsInAddressMode(SME->getOperand(0), UseTy, TLI, false) &&
SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
SCEVHandle Expr,
ScalarEvolution *SE) {
- if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
+ if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
- } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
+ } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
if (SARE->getOperand(0) == Zero) {
SubExprs.push_back(Expr);
continue;
TargetLowering::AddrMode AM;
- if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
AM.BaseOffs = SC->getValue()->getSExtValue();
AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
AM.Scale = Scale;
/// a nop.
bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
const Type *Ty2) {
+ if (Ty1 == Ty2)
+ return false;
+ Ty1 = SE->getEffectiveSCEVType(Ty1);
+ Ty2 = SE->getEffectiveSCEVType(Ty2);
if (Ty1 == Ty2)
return false;
if (Ty1->canLosslesslyBitCastTo(Ty2))
return false;
if (TLI && TLI->isTruncateFree(Ty1, Ty2))
return false;
- if (isa<PointerType>(Ty2) && Ty1->canLosslesslyBitCastTo(UIntPtrTy))
- return false;
- if (isa<PointerType>(Ty1) && Ty2->canLosslesslyBitCastTo(UIntPtrTy))
- return false;
return true;
}
const SCEVHandle &Stride,
IVExpr &IV, const Type *Ty,
const std::vector<BasedUser>& UsersToProcess) {
- if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
int64_t SInt = SC->getValue()->getSExtValue();
for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
++NewStride) {
IVsByStride.find(StrideOrder[NewStride]);
if (SI == IVsByStride.end())
continue;
- if (SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
- if (SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
+ if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
if (Stride == ME->getOperand(1) &&
SC->getValue()->getSExtValue() == -1LL)
for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
/// isNonConstantNegative - Return true if the specified scev is negated, but
/// not a constant.
static bool isNonConstantNegative(const SCEVHandle &Expr) {
- SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
+ const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
if (!Mul) return false;
// If there is a constant factor, it will be first.
- SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
+ const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
if (!SC) return false;
// Return true if the value is negative, this matches things like (-42 * V).
// Iterate through the uses to find conditions that automatically rule out
// full-lsr mode.
for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
- SCEV *Base = UsersToProcess[i].Base;
- SCEV *Imm = UsersToProcess[i].Imm;
+ const SCEV *Base = UsersToProcess[i].Base;
+ const SCEV *Imm = UsersToProcess[i].Imm;
// If any users have a loop-variant component, they can't be fully
// strength-reduced.
if (Imm && !Imm->isLoopInvariant(L))
// the two Imm values can't be folded into the address, full
// strength reduction would increase register pressure.
do {
- SCEV *CurImm = UsersToProcess[i].Imm;
+ const SCEV *CurImm = UsersToProcess[i].Imm;
if ((CurImm || Imm) && CurImm != Imm) {
if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
///
static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
const Loop *L,
- const TargetData *TD,
SCEVExpander &Rewriter) {
assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
BasicBlock *Preheader = L->getLoopPreheader();
BasicBlock *LatchBlock = L->getLoopLatch();
const Type *Ty = Start->getType();
- if (isa<PointerType>(Ty)) Ty = TD->getIntPtrType();
+ Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
SCEVHandle Imm = UsersToProcess[i].Imm;
SCEVHandle Base = UsersToProcess[i].Base;
SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
- PHINode *Phi = InsertAffinePhi(Start, Stride, L, TD,
+ PHINode *Phi = InsertAffinePhi(Start, Stride, L,
PreheaderRewriter);
// Loop over all the users with the same base.
do {
DOUT << " Inserting new PHI:\n";
PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
- Stride, L, TD,
+ Stride, L,
PreheaderRewriter);
// Remember this in case a later stride is multiple of this.
// a register operand, which potentially restricts what stride values are
// valid.
bool HaveCommonExprs = !CommonExprs->isZero();
-
const Type *ReplacedTy = CommonExprs->getType();
- if (isa<PointerType>(ReplacedTy)) ReplacedTy = TD->getIntPtrType();
// If all uses are addresses, consider sinking the immediate part of the
// common expression back into uses if they can fit in the immediate fields.
// If the immediate part of the common expression is a GV, check if it's
// possible to fold it into the target addressing mode.
GlobalValue *GV = 0;
- if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
+ if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
GV = dyn_cast<GlobalValue>(SU->getValue());
int64_t Offset = 0;
- if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
Offset = SC->getValue()->getSExtValue();
if (GV || Offset)
// Pass VoidTy as the AccessTy to be conservative, because
<< *Stride << ":\n"
<< " Common base: " << *CommonExprs << "\n";
- SCEVExpander Rewriter(*SE, *LI, *TD);
- SCEVExpander PreheaderRewriter(*SE, *LI, *TD);
+ SCEVExpander Rewriter(*SE, *LI);
+ SCEVExpander PreheaderRewriter(*SE, *LI);
BasicBlock *Preheader = L->getLoopPreheader();
Instruction *PreInsertPt = Preheader->getTerminator();
BasicBlock *LatchBlock = L->getLoopLatch();
- Value *CommonBaseV = ConstantInt::get(ReplacedTy, 0);
+ Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
DOUT << " Examining use ";
DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
/*PrintType=*/false));
- DOUT << " in Inst: " << *Inst;
+ DOUT << " in Inst: " << *(User.Inst);
// If this instruction wants to use the post-incremented value, move it
// after the post-inc and use its value instead of the PHI.
SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
- if (TD->getTypeSizeInBits(RewriteOp->getType()) !=
- TD->getTypeSizeInBits(ReplacedTy)) {
- assert(TD->getTypeSizeInBits(RewriteOp->getType()) >
- TD->getTypeSizeInBits(ReplacedTy) &&
+ if (SE->getTypeSizeInBits(RewriteOp->getType()) !=
+ SE->getTypeSizeInBits(ReplacedTy)) {
+ assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
+ SE->getTypeSizeInBits(ReplacedTy) &&
"Unexpected widening cast!");
RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
}
// it here.
if (!ReuseIV.Base->isZero()) {
SCEVHandle typedBase = ReuseIV.Base;
- if (RewriteExpr->getType()->getPrimitiveSizeInBits() !=
- ReuseIV.Base->getType()->getPrimitiveSizeInBits()) {
+ if (SE->getTypeSizeInBits(RewriteExpr->getType()) !=
+ SE->getTypeSizeInBits(ReuseIV.Base->getType())) {
// It's possible the original IV is a larger type than the new IV,
// in which case we have to truncate the Base. We checked in
// RequiresTypeConversion that this is valid.
- assert (RewriteExpr->getType()->getPrimitiveSizeInBits() <
- ReuseIV.Base->getType()->getPrimitiveSizeInBits() &&
- "Unexpected lengthening conversion!");
+ assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
+ SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
+ "Unexpected lengthening conversion!");
typedBase = SE->getTruncateExpr(ReuseIV.Base,
RewriteExpr->getType());
}
// e.g.
// 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
struct StrideCompare {
- const TargetData *TD;
- explicit StrideCompare(const TargetData *td) : TD(td) {}
+ const ScalarEvolution *SE;
+ explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
- SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
- SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
+ const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
+ const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
if (LHSC && RHSC) {
int64_t LV = LHSC->getValue()->getSExtValue();
int64_t RV = RHSC->getValue()->getSExtValue();
// If it's the same value but different type, sort by bit width so
// that we emit larger induction variables before smaller
// ones, letting the smaller be re-written in terms of larger ones.
- return TD->getTypeSizeInBits(RHS->getType()) <
- TD->getTypeSizeInBits(LHS->getType());
+ return SE->getTypeSizeInBits(RHS->getType()) <
+ SE->getTypeSizeInBits(LHS->getType());
}
return LHSC && !RHSC;
}
ICmpInst::Predicate Predicate = Cond->getPredicate();
int64_t CmpSSInt = SC->getValue()->getSExtValue();
- unsigned BitWidth = TD->getTypeSizeInBits((*CondStride)->getType());
+ unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
uint64_t SignBit = 1ULL << (BitWidth-1);
const Type *CmpTy = Cond->getOperand(0)->getType();
const Type *NewCmpTy = NULL;
- unsigned TyBits = TD->getTypeSizeInBits(CmpTy);
+ unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
unsigned NewTyBits = 0;
SCEVHandle *NewStride = NULL;
Value *NewCmpLHS = NULL;
Value *NewCmpRHS = NULL;
int64_t Scale = 1;
- SCEVHandle NewOffset = SE->getIntegerSCEV(0, UIntPtrTy);
+ SCEVHandle NewOffset = SE->getIntegerSCEV(0, CmpTy);
if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
int64_t CmpVal = C->getValue().getSExtValue();
if (!isa<SCEVConstant>(SI->first))
continue;
int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
- if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
+ if (SSInt == CmpSSInt ||
+ abs(SSInt) < abs(CmpSSInt) ||
+ (SSInt % CmpSSInt) != 0)
continue;
Scale = SSInt / CmpSSInt;
continue;
NewCmpTy = NewCmpLHS->getType();
- NewTyBits = TD->getTypeSizeInBits(NewCmpTy);
+ NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
+ const Type *NewCmpIntTy = IntegerType::get(NewTyBits);
if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
// Check if it is possible to rewrite it using
// an iv / stride of a smaller integer type.
if (!isa<PointerType>(NewCmpTy))
NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
else {
- ConstantInt *CI = ConstantInt::get(UIntPtrTy, NewCmpVal);
+ ConstantInt *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
}
NewOffset = TyBits == NewTyBits
? SE->getMulExpr(CondUse->Offset,
SE->getConstant(ConstantInt::get(CmpTy, Scale)))
- : SE->getConstant(ConstantInt::get(NewCmpTy,
+ : SE->getConstant(ConstantInt::get(NewCmpIntTy,
cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
break;
}
CondUse = &IVUsesByStride[*NewStride].Users.back();
CondStride = NewStride;
++NumEliminated;
+ Changed = true;
}
return Cond;
// Check the relevant induction variable for conformance to
// the pattern.
SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
- SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
if (!AR || !AR->isAffine() ||
AR->getStart() != One ||
AR->getStepRecurrence(*SE) != One)
const Type *SrcTy = PH->getType();
int Mantissa = DestTy->getFPMantissaWidth();
if (Mantissa == -1) continue;
- if ((int)TD->getTypeSizeInBits(SrcTy) > Mantissa)
+ if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
continue;
unsigned Entry, Latch;
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>();
- TD = &getAnalysis<TargetData>();
- UIntPtrTy = TD->getIntPtrType();
Changed = false;
// Find all uses of induction variables in this loop, and categorize
#endif
// Sort the StrideOrder so we process larger strides first.
- std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare(TD));
+ std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare(SE));
// Optimize induction variables. Some indvar uses can be transformed to use
// strides that will be needed for other purposes. A common example of this
StrideOrder.clear();
// Clean up after ourselves
- if (!DeadInsts.empty()) {
+ if (!DeadInsts.empty())
DeleteTriviallyDeadInstructions();
- BasicBlock::iterator I = L->getHeader()->begin();
- while (PHINode *PN = dyn_cast<PHINode>(I++)) {
- // At this point, we know that we have killed one or more IV users.
- // It is worth checking to see if the cannonical indvar is also
- // dead, so that we can remove it as well.
- //
- // We can remove a PHI if it is on a cycle in the def-use graph
- // where each node in the cycle has degree one, i.e. only one use,
- // and is an instruction with no side effects.
- //
- // FIXME: this needs to eliminate an induction variable even if it's being
- // compared against some value to decide loop termination.
- if (!PN->hasOneUse())
- continue;
-
- SmallPtrSet<PHINode *, 4> PHIs;
- for (Instruction *J = dyn_cast<Instruction>(*PN->use_begin());
- J && J->hasOneUse() && !J->mayWriteToMemory();
- J = dyn_cast<Instruction>(*J->use_begin())) {
- // If we find the original PHI, we've discovered a cycle.
- if (J == PN) {
- // Break the cycle and mark the PHI for deletion.
- SE->deleteValueFromRecords(PN);
- PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
- DeadInsts.push_back(PN);
- Changed = true;
- break;
- }
- // If we find a PHI more than once, we're on a cycle that
- // won't prove fruitful.
- if (isa<PHINode>(J) && !PHIs.insert(cast<PHINode>(J)))
- break;
- }
+ // At this point, it is worth checking to see if any recurrence PHIs are also
+ // dead, so that we can remove them as well. To keep ScalarEvolution
+ // current, use a ValueDeletionListener class.
+ struct LSRListener : public ValueDeletionListener {
+ ScalarEvolution &SE;
+ explicit LSRListener(ScalarEvolution &se) : SE(se) {}
+
+ virtual void ValueWillBeDeleted(Value *V) {
+ SE.deleteValueFromRecords(V);
}
- DeleteTriviallyDeadInstructions();
- }
+ } VDL(*SE);
+ DeleteDeadPHIs(L->getHeader(), &VDL);
+
return Changed;
}