#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
uint32_t lookup(Value *V) const;
uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
+ bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
void erase(Value *v);
}
}
+/// Returns true if a value number exists for the specified value.
+bool ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
+
/// lookup_or_add - Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
uint32_t ValueTable::lookup_or_add(Value *V) {
/// Emit code into this block to adjust the value defined here to the
/// specified type. This handles various coercion cases.
- Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const;
+ Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const;
};
class GVN : public FunctionPass {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const DataLayout *DL;
const TargetLibraryInfo *TLI;
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
BumpPtrAllocator TableAllocator;
+ // Block-local map of equivalent values to their leader, does not
+ // propagate to any successors. Entries added mid-block are applied
+ // to the remaining instructions in the block.
+ SmallMapVector<llvm::Value *, llvm::Constant *, 4> ReplaceWithConstMap;
SmallVector<Instruction*, 8> InstrsToErase;
typedef SmallVector<NonLocalDepResult, 64> LoadDepVect;
InstrsToErase.push_back(I);
}
- const DataLayout *getDataLayout() const { return DL; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
LeaderTableEntry* Prev = nullptr;
LeaderTableEntry* Curr = &LeaderTable[N];
- while (Curr->Val != I || Curr->BB != BB) {
+ while (Curr && (Curr->Val != I || Curr->BB != BB)) {
Prev = Curr;
Curr = Curr->Next;
}
+ if (!Curr)
+ return;
+
if (Prev) {
Prev->Next = Curr->Next;
} else {
AU.addRequired<TargetLibraryInfoWrapperPass>();
if (!NoLoads)
AU.addRequired<MemoryDependenceAnalysis>();
- AU.addRequired<AliasAnalysis>();
+ AU.addRequired<AAResultsWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
}
- // Helper fuctions of redundant load elimination
+ // Helper functions of redundant load elimination
bool processLoad(LoadInst *L);
bool processNonLocalLoad(LoadInst *L);
+ bool processAssumeIntrinsic(IntrinsicInst *II);
void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks);
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
- unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlockEdge &Root);
- bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
+ bool replaceOperandsWithConsts(Instruction *I) const;
+ bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
+ bool DominatesByEdge);
bool processFoldableCondBr(BranchInst *BI);
void addDeadBlock(BasicBlock *BB);
void assignValNumForDeadCode();
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
-/// the stored value. LoadedTy is the type of the load we want to replace and
-/// InsertPt is the place to insert new instructions.
+/// the stored value. LoadedTy is the type of the load we want to replace.
+/// IRB is IRBuilder used to insert new instructions.
///
/// If we can't do it, return null.
-static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
- Type *LoadedTy,
- Instruction *InsertPt,
+static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+ IRBuilder<> &IRB,
const DataLayout &DL) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
return nullptr;
// Pointer to Pointer -> use bitcast.
if (StoredValTy->getScalarType()->isPointerTy() &&
LoadedTy->getScalarType()->isPointerTy())
- return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy);
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
Type *TypeToCastTo = LoadedTy;
TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
- StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo);
// Cast to pointer if the load needs a pointer type.
if (LoadedTy->getScalarType()->isPointerTy())
- StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
+ StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy);
return StoredVal;
}
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
// Convert vectors and fp to integer, which can be manipulated.
if (!StoredValTy->isIntegerTy()) {
StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
- StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy);
}
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
if (DL.isBigEndian()) {
- Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
- StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
+ StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp");
}
// Truncate the integer to the right size now.
Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
- StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
+ StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc");
if (LoadedTy == NewIntTy)
return StoredVal;
// If the result is a pointer, inttoptr.
if (LoadedTy->getScalarType()->isPointerTy())
- return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
+ return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr");
// Otherwise, bitcast.
- return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast");
}
/// This function is called when we have a
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);
+ Value *StoreBase =
+ GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL);
if (StoreBase != LoadBase)
return -1;
/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
- StoreInst *DepSI,
- const DataLayout &DL) {
+ StoreInst *DepSI) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
+ const DataLayout &DL = DepSI->getModule()->getDataLayout();
Value *StorePtr = DepSI->getPointerOperand();
uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL);
unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
- unsigned Size = MemoryDependenceAnalysis::
- getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);
+ unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
+ LoadBase, LoadOffs, LoadSize, DepLI);
if (Size == 0) return -1;
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (!Src) return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
if (!GV || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
Type::getInt8PtrTy(Src->getContext(), AS));
Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
- Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
+ Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src,
+ OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- if (ConstantFoldLoadFromConstPtr(Src, &DL))
+ if (ConstantFoldLoadFromConstPtr(Src, DL))
return Offset;
return -1;
}
uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL);
}
/// This function is called when we have a
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const DataLayout &DL = *gvn.getDataLayout();
+ const DataLayout &DL = SrcVal->getModule()->getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// We know that this method is only called when the mem transfer fully
// provides the bits for the load.
++NumBytesSet;
}
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL);
}
// Otherwise, this is a memcpy/memmove from a constant global.
Type::getInt8PtrTy(Src->getContext(), AS));
Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
- Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
+ Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src,
+ OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- return ConstantFoldLoadFromConstPtr(Src, &DL);
+ return ConstantFoldLoadFromConstPtr(Src, DL);
}
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent())) {
assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block");
- return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
+ return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
}
// Otherwise, we have to construct SSA form.
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
- Type *LoadTy = LI->getType();
-
- for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
- const AvailableValueInBlock &AV = ValuesPerBlock[i];
+ for (const AvailableValueInBlock &AV : ValuesPerBlock) {
BasicBlock *BB = AV.BB;
if (SSAUpdate.HasValueForBlock(BB))
continue;
- SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn));
+ SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
}
// Perform PHI construction.
- Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
-
- // If new PHI nodes were created, notify alias analysis.
- if (V->getType()->getScalarType()->isPointerTy()) {
- AliasAnalysis *AA = gvn.getAliasAnalysis();
-
- for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
- AA->copyValue(LI, NewPHIs[i]);
-
- // Now that we've copied information to the new PHIs, scan through
- // them again and inform alias analysis that we've added potentially
- // escaping uses to any values that are operands to these PHIs.
- for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) {
- PHINode *P = NewPHIs[i];
- for (unsigned ii = 0, ee = P->getNumIncomingValues(); ii != ee; ++ii) {
- unsigned jj = PHINode::getOperandNumForIncomingValue(ii);
- AA->addEscapingUse(P->getOperandUse(jj));
- }
- }
- }
-
- return V;
+ return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
}
-Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
+Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI,
+ GVN &gvn) const {
Value *Res;
+ Type *LoadTy = LI->getType();
+ const DataLayout &DL = LI->getModule()->getDataLayout();
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const DataLayout *DL = gvn.getDataLayout();
- assert(DL && "Need target data to handle type mismatch case");
- Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
- *DL);
-
+ Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL);
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
<< *Res << '\n' << "\n\n\n");
<< *Res << '\n' << "\n\n\n");
}
} else if (isMemIntrinValue()) {
- const DataLayout *DL = gvn.getDataLayout();
- assert(DL && "Need target data to handle type mismatch case");
- Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
- LoadTy, BB->getTerminator(), *DL);
+ Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
+ BB->getTerminator(), DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
unsigned NumDeps = Deps.size();
+ const DataLayout &DL = LI->getModule()->getDataLayout();
for (unsigned i = 0, e = NumDeps; i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (DL && Address) {
- int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
- DepSI, *DL);
+ if (Address) {
+ int Offset =
+ AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getValueOperand(),
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
- if (DepLI != LI && Address && DL) {
- int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
- DepLI, *DL);
+ if (DepLI != LI && Address) {
+ int Offset =
+ AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (DL && Address) {
+ if (Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
- DepMI, *DL);
+ DepMI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (!DL || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), *DL)) {
+ if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
+ LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (!DL || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)) {
+ if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
// that we only have to insert *one* load (which means we're basically moving
// the load, not inserting a new one).
- SmallPtrSet<BasicBlock *, 4> Blockers;
- for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
- Blockers.insert(UnavailableBlocks[i]);
+ SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
+ UnavailableBlocks.end());
// Let's find the first basic block with more than one predecessor. Walk
// backwards through predecessors if needed.
// available.
MapVector<BasicBlock *, Value *> PredLoads;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
- for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
- FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
- for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
- FullyAvailableBlocks[UnavailableBlocks[i]] = false;
+ for (const AvailableValueInBlock &AV : ValuesPerBlock)
+ FullyAvailableBlocks[AV.BB] = true;
+ for (BasicBlock *UnavailableBB : UnavailableBlocks)
+ FullyAvailableBlocks[UnavailableBB] = false;
SmallVector<BasicBlock *, 4> CriticalEdgePred;
- for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
- PI != E; ++PI) {
- BasicBlock *Pred = *PI;
+ for (BasicBlock *Pred : predecessors(LoadBB)) {
+ // If any predecessor block is an EH pad that does not allow non-PHI
+ // instructions before the terminator, we can't PRE the load.
+ if (Pred->getTerminator()->isEHPad()) {
+ DEBUG(dbgs()
+ << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
+ << Pred->getName() << "': " << *LI << '\n');
+ return false;
+ }
+
if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
return false;
}
- if (LoadBB->isLandingPad()) {
+ if (LoadBB->isEHPad()) {
DEBUG(dbgs()
- << "COULD NOT PRE LOAD BECAUSE OF LANDING PAD CRITICAL EDGE '"
+ << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
+ const DataLayout &DL = LI->getModule()->getDataLayout();
SmallVector<Instruction*, 8> NewInsts;
for (auto &PredLoad : PredLoads) {
BasicBlock *UnavailablePred = PredLoad.first;
<< *NewInsts.back() << '\n');
// Assign value numbers to the new instructions.
- for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
+ for (Instruction *I : NewInsts) {
// FIXME: We really _ought_ to insert these value numbers into their
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
- VN.lookup_or_add(NewInsts[i]);
+ VN.lookup_or_add(I);
}
for (const auto &PredLoad : PredLoads) {
if (Tags)
NewLoad->setAAMetadata(Tags);
+ if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
+ NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
+ if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
+ NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
+
// Transfer DebugLoc.
NewLoad->setDebugLoc(LI->getDebugLoc());
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
/// Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVN::processNonLocalLoad(LoadInst *LI) {
+ // non-local speculations are not allowed under asan.
+ if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeAddress))
+ return false;
+
// Step 1: Find the non-local dependencies of the load.
LoadDepVect Deps;
MD->getNonLocalPointerDependency(LI, Deps);
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (LI->getDebugLoc())
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
}
+bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
+ assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
+ "This function can only be called with llvm.assume intrinsic");
+ Value *V = IntrinsicI->getArgOperand(0);
+
+ if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
+ if (Cond->isZero()) {
+ Type *Int8Ty = Type::getInt8Ty(V->getContext());
+ // Insert a new store to null instruction before the load to indicate that
+ // this code is not reachable. FIXME: We could insert unreachable
+ // instruction directly because we can modify the CFG.
+ new StoreInst(UndefValue::get(Int8Ty),
+ Constant::getNullValue(Int8Ty->getPointerTo()),
+ IntrinsicI);
+ }
+ markInstructionForDeletion(IntrinsicI);
+ return false;
+ }
+
+ Constant *True = ConstantInt::getTrue(V->getContext());
+ bool Changed = false;
+
+ for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
+ BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
+
+ // This property is only true in dominated successors, propagateEquality
+ // will check dominance for us.
+ Changed |= propagateEquality(V, True, Edge, false);
+ }
+
+ // We can replace assume value with true, which covers cases like this:
+ // call void @llvm.assume(i1 %cmp)
+ // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
+ ReplaceWithConstMap[V] = True;
+
+ // If one of *cmp *eq operand is const, adding it to map will cover this:
+ // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
+ // call void @llvm.assume(i1 %cmp)
+ // ret float %0 ; will change it to ret float 3.000000e+00
+ if (auto *CmpI = dyn_cast<CmpInst>(V)) {
+ if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ ||
+ CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
+ (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
+ CmpI->getFastMathFlags().noNaNs())) {
+ Value *CmpLHS = CmpI->getOperand(0);
+ Value *CmpRHS = CmpI->getOperand(1);
+ if (isa<Constant>(CmpLHS))
+ std::swap(CmpLHS, CmpRHS);
+ auto *RHSConst = dyn_cast<Constant>(CmpRHS);
+
+ // If only one operand is constant.
+ if (RHSConst != nullptr && !isa<Constant>(CmpLHS))
+ ReplaceWithConstMap[CmpLHS] = RHSConst;
+ }
+ }
+ return Changed;
+}
static void patchReplacementInstruction(Instruction *I, Value *Repl) {
// Patch the replacement so that it is not more restrictive than the value
// being replaced.
BinaryOperator *Op = dyn_cast<BinaryOperator>(I);
BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl);
- if (Op && ReplOp && isa<OverflowingBinaryOperator>(Op) &&
- isa<OverflowingBinaryOperator>(ReplOp)) {
- if (ReplOp->hasNoSignedWrap() && !Op->hasNoSignedWrap())
- ReplOp->setHasNoSignedWrap(false);
- if (ReplOp->hasNoUnsignedWrap() && !Op->hasNoUnsignedWrap())
- ReplOp->setHasNoUnsignedWrap(false);
- }
+ if (Op && ReplOp)
+ ReplOp->andIRFlags(Op);
+
if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
// FIXME: If both the original and replacement value are part of the
// same control-flow region (meaning that the execution of one
- // guarentees the executation of the other), then we can combine the
+ // guarantees the execution of the other), then we can combine the
// noalias scopes here and do better than the general conservative
// answer used in combineMetadata().
// In general, GVN unifies expressions over different control-flow
// regions, and so we need a conservative combination of the noalias
// scopes.
- unsigned KnownIDs[] = {
- LLVMContext::MD_tbaa,
- LLVMContext::MD_alias_scope,
- LLVMContext::MD_noalias,
- LLVMContext::MD_range,
- LLVMContext::MD_fpmath,
- LLVMContext::MD_invariant_load,
- };
+ static const unsigned KnownIDs[] = {
+ LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
+ LLVMContext::MD_noalias, LLVMContext::MD_range,
+ LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load,
+ LLVMContext::MD_invariant_group};
combineMetadata(ReplInst, I, KnownIDs);
}
}
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
+ const DataLayout &DL = L->getModule()->getDataLayout();
// If we have a clobber and target data is around, see if this is a clobber
// that we can fix up through code synthesis.
- if (Dep.isClobber() && DL) {
+ if (Dep.isClobber()) {
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
// access code.
Value *AvailVal = nullptr;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
- int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
- L->getPointerOperand(),
- DepSI, *DL);
+ int Offset = AnalyzeLoadFromClobberingStore(
+ L->getType(), L->getPointerOperand(), DepSI);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
- L->getType(), L, *DL);
+ L->getType(), L, DL);
}
// Check to see if we have something like this:
if (DepLI == L)
return false;
- int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
- L->getPointerOperand(),
- DepLI, *DL);
+ int Offset = AnalyzeLoadFromClobberingLoad(
+ L->getType(), L->getPointerOperand(), DepLI, DL);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
- int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
- L->getPointerOperand(),
- DepMI, *DL);
+ int Offset = AnalyzeLoadFromClobberingMemInst(
+ L->getType(), L->getPointerOperand(), DepMI, DL);
if (Offset != -1)
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL);
}
if (AvailVal) {
++NumGVNLoad;
return true;
}
- }
- // If the value isn't available, don't do anything!
- if (Dep.isClobber()) {
+ // If the value isn't available, don't do anything!
DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load ";
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
- if (DL) {
- StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
- L, *DL);
- if (!StoredVal)
- return false;
-
- DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
- << '\n' << *L << "\n\n\n");
- }
- else
+ IRBuilder<> Builder(L);
+ StoredVal =
+ CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL);
+ if (!StoredVal)
return false;
+
+ DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
+ << '\n' << *L << "\n\n\n");
}
// Remove it!
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- if (DL) {
- AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
- L, *DL);
- if (!AvailableVal)
- return false;
-
- DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
- << "\n" << *L << "\n\n\n");
- }
- else
+ IRBuilder<> Builder(L);
+ AvailableVal =
+ CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL);
+ if (!AvailableVal)
return false;
+
+ DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
+ << "\n" << *L << "\n\n\n");
}
// Remove it!
return Val;
}
-/// Replace all uses of 'From' with 'To' if the use is dominated by the given
-/// basic block. Returns the number of uses that were replaced.
-unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlockEdge &Root) {
- unsigned Count = 0;
- for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
- UI != UE; ) {
- Use &U = *UI++;
-
- if (DT->dominates(Root, U)) {
- U.set(To);
- ++Count;
- }
- }
- return Count;
-}
-
/// There is an edge from 'Src' to 'Dst'. Return
/// true if every path from the entry block to 'Dst' passes via this edge. In
/// particular 'Dst' must not be reachable via another edge from 'Src'.
return Pred != nullptr;
}
+// Tries to replace instruction with const, using information from
+// ReplaceWithConstMap.
+bool GVN::replaceOperandsWithConsts(Instruction *Instr) const {
+ bool Changed = false;
+ for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
+ Value *Operand = Instr->getOperand(OpNum);
+ auto it = ReplaceWithConstMap.find(Operand);
+ if (it != ReplaceWithConstMap.end()) {
+ assert(!isa<Constant>(Operand) &&
+ "Replacing constants with constants is invalid");
+ DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second
+ << " in instruction " << *Instr << '\n');
+ Instr->setOperand(OpNum, it->second);
+ Changed = true;
+ }
+ }
+ return Changed;
+}
+
/// The given values are known to be equal in every block
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
-bool GVN::propagateEquality(Value *LHS, Value *RHS,
- const BasicBlockEdge &Root) {
+/// If DominatesByEdge is false, then it means that it is dominated by Root.End.
+bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
+ bool DominatesByEdge) {
SmallVector<std::pair<Value*, Value*>, 4> Worklist;
Worklist.push_back(std::make_pair(LHS, RHS));
bool Changed = false;
std::pair<Value*, Value*> Item = Worklist.pop_back_val();
LHS = Item.first; RHS = Item.second;
- if (LHS == RHS) continue;
+ if (LHS == RHS)
+ continue;
assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
// Don't try to propagate equalities between constants.
- if (isa<Constant>(LHS) && isa<Constant>(RHS)) continue;
+ if (isa<Constant>(LHS) && isa<Constant>(RHS))
+ continue;
// Prefer a constant on the right-hand side, or an Argument if no constants.
if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
// LHS always has at least one use that is not dominated by Root, this will
// never do anything if LHS has only one use.
if (!LHS->hasOneUse()) {
- unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root);
+ unsigned NumReplacements =
+ DominatesByEdge
+ ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
+ : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getEnd());
+
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
}
Value *NotCmp = findLeader(Root.getEnd(), Num);
if (NotCmp && isa<Instruction>(NotCmp)) {
unsigned NumReplacements =
- replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
+ DominatesByEdge
+ ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
+ : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
+ Root.getEnd());
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
}
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->getScalarType()->isPointerTy())
return true;
}
+ if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
+ if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
+ return processAssumeIntrinsic(IntrinsicI);
+
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (processLoad(LI))
return true;
Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
BasicBlockEdge TrueE(Parent, TrueSucc);
- Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
+ Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
BasicBlockEdge FalseE(Parent, FalseSucc);
- Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
+ Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
return Changed;
}
// If there is only a single edge, propagate the case value into it.
if (SwitchEdges.lookup(Dst) == 1) {
BasicBlockEdge E(Parent, Dst);
- Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
+ Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E, true);
}
}
return Changed;
// Instructions with void type don't return a value, so there's
// no point in trying to find redundancies in them.
- if (I->getType()->isVoidTy()) return false;
+ if (I->getType()->isVoidTy())
+ return false;
uint32_t NextNum = VN.getNextUnusedValueNumber();
unsigned Num = VN.lookup_or_add(I);
// Perform fast-path value-number based elimination of values inherited from
// dominators.
- Value *repl = findLeader(I->getParent(), Num);
- if (!repl) {
+ Value *Repl = findLeader(I->getParent(), Num);
+ if (!Repl) {
// Failure, just remember this instance for future use.
addToLeaderTable(Num, I, I->getParent());
return false;
+ } else if (Repl == I) {
+ // If I was the result of a shortcut PRE, it might already be in the table
+ // and the best replacement for itself. Nothing to do.
+ return false;
}
// Remove it!
- patchAndReplaceAllUsesWith(I, repl);
- if (MD && repl->getType()->getScalarType()->isPointerTy())
- MD->invalidateCachedPointerInfo(repl);
+ patchAndReplaceAllUsesWith(I, Repl);
+ if (MD && Repl->getType()->getScalarType()->isPointerTy())
+ MD->invalidateCachedPointerInfo(Repl);
markInstructionForDeletion(I);
return true;
}
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
+ VN.setAliasAnalysis(&getAnalysis<AAResultsWrapperPass>().getAAResults());
VN.setMemDep(MD);
VN.setDomTree(DT);
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
- BasicBlock *BB = FI++;
+ BasicBlock *BB = &*FI++;
- bool removedBlock = MergeBlockIntoPredecessor(
- BB, DT, /* LoopInfo */ nullptr, VN.getAliasAnalysis(), MD);
+ bool removedBlock =
+ MergeBlockIntoPredecessor(BB, DT, /* LoopInfo */ nullptr, MD);
if (removedBlock) ++NumGVNBlocks;
Changed |= removedBlock;
return Changed;
}
-
bool GVN::processBlock(BasicBlock *BB) {
// FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
// (and incrementing BI before processing an instruction).
if (DeadBlocks.count(BB))
return false;
+ // Clearing map before every BB because it can be used only for single BB.
+ ReplaceWithConstMap.clear();
bool ChangedFunction = false;
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE;) {
- ChangedFunction |= processInstruction(BI);
+ if (!ReplaceWithConstMap.empty())
+ ChangedFunction |= replaceOperandsWithConsts(&*BI);
+ ChangedFunction |= processInstruction(&*BI);
+
if (InstrsToErase.empty()) {
++BI;
continue;
Value *Op = Instr->getOperand(i);
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
-
+ // This could be a newly inserted instruction, in which case, we won't
+ // find a value number, and should give up before we hurt ourselves.
+ // FIXME: Rewrite the infrastructure to let it easier to value number
+ // and process newly inserted instructions.
+ if (!VN.exists(Op)) {
+ success = false;
+ break;
+ }
if (Value *V = findLeader(Pred, VN.lookup(Op))) {
Instr->setOperand(i, V);
} else {
BasicBlock *CurrentBlock = CurInst->getParent();
predMap.clear();
- for (pred_iterator PI = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock);
- PI != PE; ++PI) {
- BasicBlock *P = *PI;
+ for (BasicBlock *P : predecessors(CurrentBlock)) {
// We're not interested in PRE where the block is its
// own predecessor, or in blocks with predecessors
// that are not reachable.
// Create a PHI to make the value available in this block.
PHINode *Phi =
PHINode::Create(CurInst->getType(), predMap.size(),
- CurInst->getName() + ".pre-phi", CurrentBlock->begin());
+ CurInst->getName() + ".pre-phi", &CurrentBlock->front());
for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
if (Value *V = predMap[i].first)
Phi->addIncoming(V, predMap[i].second);
addToLeaderTable(ValNo, Phi, CurrentBlock);
Phi->setDebugLoc(CurInst->getDebugLoc());
CurInst->replaceAllUsesWith(Phi);
- if (Phi->getType()->getScalarType()->isPointerTy()) {
- // Because we have added a PHI-use of the pointer value, it has now
- // "escaped" from alias analysis' perspective. We need to inform
- // AA of this.
- for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; ++ii) {
- unsigned jj = PHINode::getOperandNumForIncomingValue(ii);
- VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj));
- }
-
- if (MD)
- MD->invalidateCachedPointerInfo(Phi);
- }
+ if (MD && Phi->getType()->getScalarType()->isPointerTy())
+ MD->invalidateCachedPointerInfo(Phi);
VN.erase(CurInst);
removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
if (CurrentBlock == &F.getEntryBlock())
continue;
- // Don't perform PRE on a landing pad.
- if (CurrentBlock->isLandingPad())
+ // Don't perform PRE on an EH pad.
+ if (CurrentBlock->isEHPad())
continue;
for (BasicBlock::iterator BI = CurrentBlock->begin(),
BE = CurrentBlock->end();
BI != BE;) {
- Instruction *CurInst = BI++;
- Changed = performScalarPRE(CurInst);
+ Instruction *CurInst = &*BI++;
+ Changed |= performScalarPRE(CurInst);
}
}
/// Split the critical edge connecting the given two blocks, and return
/// the block inserted to the critical edge.
BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
- BasicBlock *BB = SplitCriticalEdge(
- Pred, Succ, CriticalEdgeSplittingOptions(getAliasAnalysis(), DT));
+ BasicBlock *BB =
+ SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT));
if (MD)
MD->invalidateCachedPredecessors();
return BB;
do {
std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
SplitCriticalEdge(Edge.first, Edge.second,
- CriticalEdgeSplittingOptions(getAliasAnalysis(), DT));
+ CriticalEdgeSplittingOptions(DT));
} while (!toSplit.empty());
if (MD) MD->invalidateCachedPredecessors();
return true;
DeadBlocks.insert(Dom.begin(), Dom.end());
// Figure out the dominance-frontier(D).
- for (SmallVectorImpl<BasicBlock *>::iterator I = Dom.begin(),
- E = Dom.end(); I != E; I++) {
- BasicBlock *B = *I;
- for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) {
- BasicBlock *S = *SI;
+ for (BasicBlock *B : Dom) {
+ for (BasicBlock *S : successors(B)) {
if (DeadBlocks.count(S))
continue;
bool AllPredDead = true;
- for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++)
- if (!DeadBlocks.count(*PI)) {
+ for (BasicBlock *P : predecessors(S))
+ if (!DeadBlocks.count(P)) {
AllPredDead = false;
break;
}
continue;
SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B));
- for (SmallVectorImpl<BasicBlock *>::iterator PI = Preds.begin(),
- PE = Preds.end(); PI != PE; PI++) {
- BasicBlock *P = *PI;
-
+ for (BasicBlock *P : Preds) {
if (!DeadBlocks.count(P))
continue;
// R be the target of the dead out-coming edge.
// 1) Identify the set of dead blocks implied by the branch's dead outcoming
// edge. The result of this step will be {X| X is dominated by R}
-// 2) Identify those blocks which haves at least one dead prodecessor. The
+// 2) Identify those blocks which haves at least one dead predecessor. The
// result of this step will be dominance-frontier(R).
// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
if (!BI || BI->isUnconditional())
return false;
+ // If a branch has two identical successors, we cannot declare either dead.
+ if (BI->getSuccessor(0) == BI->getSuccessor(1))
+ return false;
+
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
if (!Cond)
return false;
// instructions, it makes more sense just to "fabricate" a val-number for the
// dead code than checking if instruction involved is dead or not.
void GVN::assignValNumForDeadCode() {
- for (SetVector<BasicBlock *>::iterator I = DeadBlocks.begin(),
- E = DeadBlocks.end(); I != E; I++) {
- BasicBlock *BB = *I;
- for (BasicBlock::iterator II = BB->begin(), EE = BB->end();
- II != EE; II++) {
- Instruction *Inst = &*II;
- unsigned ValNum = VN.lookup_or_add(Inst);
- addToLeaderTable(ValNum, Inst, BB);
+ for (BasicBlock *BB : DeadBlocks) {
+ for (Instruction &Inst : *BB) {
+ unsigned ValNum = VN.lookup_or_add(&Inst);
+ addToLeaderTable(ValNum, &Inst, BB);
}
}
}