//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dse"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/Instructions.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Pass.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DataLayout.h"
-#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "dse"
+
+STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
STATISTIC(NumFastStores, "Number of stores deleted");
STATISTIC(NumFastOther , "Number of other instrs removed");
const TargetLibraryInfo *TLI;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
+ DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
initializeDSEPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F) {
- AA = &getAnalysis<AliasAnalysis>();
+ bool runOnFunction(Function &F) override {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DT = &getAnalysis<DominatorTree>();
- TLI = AA->getTargetLibraryInfo();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
bool Changed = false;
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
+ for (BasicBlock &I : F)
// Only check non-dead blocks. Dead blocks may have strange pointer
// cycles that will confuse alias analysis.
- if (DT->isReachableFromEntry(I))
- Changed |= runOnBasicBlock(*I);
+ if (DT->isReachableFromEntry(&I))
+ Changed |= runOnBasicBlock(I);
- AA = 0; MD = 0; DT = 0;
+ AA = nullptr; MD = nullptr; DT = nullptr;
return Changed;
}
bool runOnBasicBlock(BasicBlock &BB);
+ bool MemoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI);
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
- void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallSetVector<Value*, 16> &DeadStackObjects);
+ void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
+ SmallSetVector<Value *, 16> &DeadStackObjects,
+ const DataLayout &DL);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<DominatorTree>();
- AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<MemoryDependenceAnalysis>();
- AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<DominatorTree>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
};
char DSE::ID = 0;
INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
/// If ValueSet is non-null, remove any deleted instructions from it as well.
///
static void DeleteDeadInstruction(Instruction *I,
- MemoryDependenceAnalysis &MD,
- const TargetLibraryInfo *TLI,
- SmallSetVector<Value*, 16> *ValueSet = 0) {
+ MemoryDependenceAnalysis &MD,
+ const TargetLibraryInfo &TLI,
+ SmallSetVector<Value*, 16> *ValueSet = nullptr) {
SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I);
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
+ DeadInst->setOperand(op, nullptr);
// If this operand just became dead, add it to the NowDeadInsts list.
if (!Op->use_empty()) continue;
if (Instruction *OpI = dyn_cast<Instruction>(Op))
- if (isInstructionTriviallyDead(OpI, TLI))
+ if (isInstructionTriviallyDead(OpI, &TLI))
NowDeadInsts.push_back(OpI);
}
/// hasMemoryWrite - Does this instruction write some memory? This only returns
/// true for things that we can analyze with other helpers below.
-static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
+static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
if (isa<StoreInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
return true;
}
}
- if (CallSite CS = I) {
+ if (auto CS = CallSite(I)) {
if (Function *F = CS.getCalledFunction()) {
- if (TLI && TLI->has(LibFunc::strcpy) &&
- F->getName() == TLI->getName(LibFunc::strcpy)) {
+ if (TLI.has(LibFunc::strcpy) &&
+ F->getName() == TLI.getName(LibFunc::strcpy)) {
return true;
}
- if (TLI && TLI->has(LibFunc::strncpy) &&
- F->getName() == TLI->getName(LibFunc::strncpy)) {
+ if (TLI.has(LibFunc::strncpy) &&
+ F->getName() == TLI.getName(LibFunc::strncpy)) {
return true;
}
- if (TLI && TLI->has(LibFunc::strcat) &&
- F->getName() == TLI->getName(LibFunc::strcat)) {
+ if (TLI.has(LibFunc::strcat) &&
+ F->getName() == TLI.getName(LibFunc::strcat)) {
return true;
}
- if (TLI && TLI->has(LibFunc::strncat) &&
- F->getName() == TLI->getName(LibFunc::strncat)) {
+ if (TLI.has(LibFunc::strncat) &&
+ F->getName() == TLI.getName(LibFunc::strncat)) {
return true;
}
}
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
-static AliasAnalysis::Location
-getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
- return AA.getLocation(SI);
+ return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
- AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
- // If we don't have target data around, an unknown size in Location means
- // that we should use the size of the pointee type. This isn't valid for
- // memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
- return AliasAnalysis::Location();
+ MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (II == 0) return AliasAnalysis::Location();
+ if (!II)
+ return MemoryLocation();
switch (II->getIntrinsicID()) {
- default: return AliasAnalysis::Location(); // Unhandled intrinsic.
+ default:
+ return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
- // If we don't have target data around, an unknown size in Location means
- // that we should use the size of the pointee type. This isn't valid for
- // init.trampoline, which writes more than an i8.
- if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
-
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
- return AliasAnalysis::Location(II->getArgOperand(0));
+ return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- return AliasAnalysis::Location(II->getArgOperand(1), Len);
+ return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
/// instruction if any.
-static AliasAnalysis::Location
-getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
- assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
- "Unknown instruction case");
+static MemoryLocation getLocForRead(Instruction *Inst,
+ const TargetLibraryInfo &TLI) {
+ assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
// The only instructions that both read and write are the mem transfer
// instructions (memcpy/memmove).
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
- return AA.getLocationForSource(MTI);
- return AliasAnalysis::Location();
+ return MemoryLocation::getForSource(MTI);
+ return MemoryLocation();
}
}
}
- if (CallSite CS = I)
+ if (auto CS = CallSite(I))
return CS.getInstruction()->use_empty();
return false;
}
}
- CallSite CS = I;
+ CallSite CS(I);
// All the supported functions so far happen to have dest as their first
// argument.
return CS.getArgument(0);
}
-static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
+static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
+ const TargetLibraryInfo &TLI) {
uint64_t Size;
- if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
+ if (getObjectSize(V, Size, DL, &TLI))
return Size;
- return AliasAnalysis::UnknownSize;
+ return MemoryLocation::UnknownSize;
}
namespace {
/// completely overwrites a store to the 'Earlier' location.
/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
-static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
- const AliasAnalysis::Location &Earlier,
- AliasAnalysis &AA,
- int64_t &EarlierOff,
- int64_t &LaterOff) {
+static OverwriteResult isOverwrite(const MemoryLocation &Later,
+ const MemoryLocation &Earlier,
+ const DataLayout &DL,
+ const TargetLibraryInfo &TLI,
+ int64_t &EarlierOff, int64_t &LaterOff) {
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
if (P1 == P2) {
// If we don't know the sizes of either access, then we can't do a
// comparison.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize) {
- // If we have no DataLayout information around, then the size of the store
- // is inferrable from the pointee type. If they are the same type, then
- // we know that the store is safe.
- if (AA.getDataLayout() == 0 &&
- Later.Ptr->getType() == Earlier.Ptr->getType())
- return OverwriteComplete;
-
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
- }
// Make sure that the Later size is >= the Earlier size.
if (Later.Size >= Earlier.Size)
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize ||
- AA.getDataLayout() == 0)
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
- // an alloca, or a byval argument). If so, then it clearly overwrites any
- // other store to the same object.
- const DataLayout &TD = *AA.getDataLayout();
-
- const Value *UO1 = GetUnderlyingObject(P1, &TD),
- *UO2 = GetUnderlyingObject(P2, &TD);
+ // an alloca, or a byval/inalloca argument). If so, then it clearly
+ // overwrites any other store to the same object.
+ const Value *UO1 = GetUnderlyingObject(P1, DL),
+ *UO2 = GetUnderlyingObject(P2, DL);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
return OverwriteUnknown;
// If the "Later" store is to a recognizable object, get its size.
- uint64_t ObjectSize = getPointerSize(UO2, AA);
- if (ObjectSize != AliasAnalysis::UnknownSize)
+ uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
+ if (ObjectSize != MemoryLocation::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
return OverwriteComplete;
// pointers are equal, then we can reason about the two stores.
EarlierOff = 0;
LaterOff = 0;
- const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
- const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
+ const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
+ const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
// If the base pointers still differ, we have two completely different stores.
if (BP1 != BP2)
/// This function detects when it is unsafe to remove a dependent instruction
/// because the DSE inducing instruction may be a self-read.
static bool isPossibleSelfRead(Instruction *Inst,
- const AliasAnalysis::Location &InstStoreLoc,
- Instruction *DepWrite, AliasAnalysis &AA) {
+ const MemoryLocation &InstStoreLoc,
+ Instruction *DepWrite,
+ const TargetLibraryInfo &TLI,
+ AliasAnalysis &AA) {
// Self reads can only happen for instructions that read memory. Get the
// location read.
- AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
- if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
+ MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
+ if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
// Here we don't know if A/B may alias, but we do know that B/B are must
// aliases, so removing the first memcpy is safe (assuming it writes <= #
// bytes as the second one.
- AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
+ MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
return false;
//===----------------------------------------------------------------------===//
bool DSE::runOnBasicBlock(BasicBlock &BB) {
+ const DataLayout &DL = BB.getModule()->getDataLayout();
bool MadeChange = false;
// Do a top-down walk on the BB.
for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
- Instruction *Inst = BBI++;
+ Instruction *Inst = &*BBI++;
// Handle 'free' calls specially.
if (CallInst *F = isFreeCall(Inst, TLI)) {
}
// If we find something that writes memory, get its memory dependence.
- if (!hasMemoryWrite(Inst, TLI))
- continue;
-
- MemDepResult InstDep = MD->getDependency(Inst);
-
- // Ignore any store where we can't find a local dependence.
- // FIXME: cross-block DSE would be fun. :)
- if (!InstDep.isDef() && !InstDep.isClobber())
+ if (!hasMemoryWrite(Inst, *TLI))
continue;
// If we're storing the same value back to a pointer that we just
// loaded from, then the store can be removed.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
+
+ auto RemoveDeadInstAndUpdateBBI = [&](Instruction *DeadInst) {
+ // DeleteDeadInstruction can delete the current instruction. Save BBI
+ // in case we need it.
+ WeakVH NextInst(&*BBI);
+
+ DeleteDeadInstruction(DeadInst, *MD, *TLI);
+
+ if (!NextInst) // Next instruction deleted.
+ BBI = BB.begin();
+ else if (BBI != BB.begin()) // Revisit this instruction if possible.
+ --BBI;
+ ++NumRedundantStores;
+ MadeChange = true;
+ };
+
+ if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
- SI->getOperand(0) == DepLoad && isRemovable(SI)) {
+ isRemovable(SI) &&
+ MemoryIsNotModifiedBetween(DepLoad, SI)) {
+
DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
<< "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
- // DeleteDeadInstruction can delete the current instruction. Save BBI
- // in case we need it.
- WeakVH NextInst(BBI);
+ RemoveDeadInstAndUpdateBBI(SI);
+ continue;
+ }
+ }
- DeleteDeadInstruction(SI, *MD, TLI);
+ // Remove null stores into the calloc'ed objects
+ Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
- if (NextInst == 0) // Next instruction deleted.
- BBI = BB.begin();
- else if (BBI != BB.begin()) // Revisit this instruction if possible.
- --BBI;
- ++NumFastStores;
- MadeChange = true;
+ if (StoredConstant && StoredConstant->isNullValue() &&
+ isRemovable(SI)) {
+ Instruction *UnderlyingPointer = dyn_cast<Instruction>(
+ GetUnderlyingObject(SI->getPointerOperand(), DL));
+
+ if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
+ MemoryIsNotModifiedBetween(UnderlyingPointer, SI)) {
+ DEBUG(dbgs()
+ << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
+ << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
+
+ RemoveDeadInstAndUpdateBBI(SI);
continue;
}
}
}
+ MemDepResult InstDep = MD->getDependency(Inst);
+
+ // Ignore any store where we can't find a local dependence.
+ // FIXME: cross-block DSE would be fun. :)
+ if (!InstDep.isDef() && !InstDep.isClobber())
+ continue;
+
// Figure out what location is being stored to.
- AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
+ MemoryLocation Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
- if (Loc.Ptr == 0)
+ if (!Loc.Ptr)
continue;
while (InstDep.isDef() || InstDep.isClobber()) {
//
// Find out what memory location the dependent instruction stores.
Instruction *DepWrite = InstDep.getInst();
- AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
+ MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
- if (DepLoc.Ptr == 0)
+ if (!DepLoc.Ptr)
break;
// If we find a write that is a) removable (i.e., non-volatile), b) is
// completely obliterated by the store to 'Loc', and c) which we know that
// 'Inst' doesn't load from, then we can remove it.
if (isRemovable(DepWrite) &&
- !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
+ !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
int64_t InstWriteOffset, DepWriteOffset;
- OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
- DepWriteOffset, InstWriteOffset);
+ OverwriteResult OR =
+ isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset);
if (OR == OverwriteComplete) {
DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
// Delete the store and now-dead instructions that feed it.
- DeleteDeadInstruction(DepWrite, *MD, TLI);
+ DeleteDeadInstruction(DepWrite, *MD, *TLI);
++NumFastStores;
MadeChange = true;
// DeleteDeadInstruction can delete the current instruction in loop
// cases, reset BBI.
- BBI = Inst;
+ BBI = Inst->getIterator();
if (BBI != BB.begin())
--BBI;
break;
if (DepWrite == &BB.front()) break;
// Can't look past this instruction if it might read 'Loc'.
- if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
+ if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
break;
- InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
+ InstDep = MD->getPointerDependencyFrom(Loc, false,
+ DepWrite->getIterator(), &BB);
}
}
return MadeChange;
}
+/// Returns true if the memory which is accessed by the second instruction is not
+/// modified between the first and the second instruction.
+/// Precondition: Second instruction must be dominated by the first
+/// instruction.
+bool DSE::MemoryIsNotModifiedBetween(Instruction *FirstI,
+ Instruction *SecondI) {
+ SmallVector<BasicBlock *, 16> WorkList;
+ SmallPtrSet<BasicBlock *, 8> Visited;
+ BasicBlock::iterator FirstBBI(FirstI);
+ ++FirstBBI;
+ BasicBlock::iterator SecondBBI(SecondI);
+ BasicBlock *FirstBB = FirstI->getParent();
+ BasicBlock *SecondBB = SecondI->getParent();
+ MemoryLocation MemLoc = MemoryLocation::get(SecondI);
+
+ // Start checking the store-block.
+ WorkList.push_back(SecondBB);
+ bool isFirstBlock = true;
+
+ // Check all blocks going backward until we reach the load-block.
+ while (!WorkList.empty()) {
+ BasicBlock *B = WorkList.pop_back_val();
+
+ // Ignore instructions before LI if this is the FirstBB.
+ BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
+
+ BasicBlock::iterator EI;
+ if (isFirstBlock) {
+ // Ignore instructions after SI if this is the first visit of SecondBB.
+ assert(B == SecondBB && "first block is not the store block");
+ EI = SecondBBI;
+ isFirstBlock = false;
+ } else {
+ // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
+ // In this case we also have to look at instructions after SI.
+ EI = B->end();
+ }
+ for (; BI != EI; ++BI) {
+ Instruction *I = &*BI;
+ if (I->mayWriteToMemory() && I != SecondI) {
+ auto Res = AA->getModRefInfo(I, MemLoc);
+ if (Res != MRI_NoModRef)
+ return false;
+ }
+ }
+ if (B != FirstBB) {
+ assert(B != &FirstBB->getParent()->getEntryBlock() &&
+ "Should not hit the entry block because SI must be dominated by LI");
+ for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
+ if (!Visited.insert(*PredI).second)
+ continue;
+ WorkList.push_back(*PredI);
+ }
+ }
+ }
+ return true;
+}
+
/// Find all blocks that will unconditionally lead to the block BB and append
/// them to F.
static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
bool DSE::HandleFree(CallInst *F) {
bool MadeChange = false;
- AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
+ MemoryLocation Loc = MemoryLocation(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
+ const DataLayout &DL = F->getModule()->getDataLayout();
while (!Blocks.empty()) {
BasicBlock *BB = Blocks.pop_back_val();
Instruction *InstPt = BB->getTerminator();
if (BB == F->getParent()) InstPt = F;
- MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
+ MemDepResult Dep =
+ MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
while (Dep.isDef() || Dep.isClobber()) {
Instruction *Dependency = Dep.getInst();
- if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
+ if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
break;
Value *DepPointer =
- GetUnderlyingObject(getStoredPointerOperand(Dependency));
+ GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
break;
- Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
+ auto Next = ++Dependency->getIterator();
// DCE instructions only used to calculate that store
- DeleteDeadInstruction(Dependency, *MD, TLI);
+ DeleteDeadInstruction(Dependency, *MD, *TLI);
++NumFastStores;
MadeChange = true;
return MadeChange;
}
-namespace {
- struct CouldRef {
- typedef Value *argument_type;
- const CallSite CS;
- AliasAnalysis *AA;
-
- bool operator()(Value *I) {
- // See if the call site touches the value.
- AliasAnalysis::ModRefResult A =
- AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
-
- return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
- }
- };
-}
-
/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block. Ex:
/// %A = alloca i32
SmallSetVector<Value*, 16> DeadStackObjects;
// Find all of the alloca'd pointers in the entry block.
- BasicBlock *Entry = BB.getParent()->begin();
- for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
- if (isa<AllocaInst>(I))
- DeadStackObjects.insert(I);
+ BasicBlock &Entry = BB.getParent()->front();
+ for (Instruction &I : Entry) {
+ if (isa<AllocaInst>(&I))
+ DeadStackObjects.insert(&I);
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
- else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
- DeadStackObjects.insert(I);
+ else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
+ DeadStackObjects.insert(&I);
}
- // Treat byval arguments the same, stores to them are dead at the end of the
- // function.
- for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
- AE = BB.getParent()->arg_end(); AI != AE; ++AI)
- if (AI->hasByValAttr())
- DeadStackObjects.insert(AI);
+ // Treat byval or inalloca arguments the same, stores to them are dead at the
+ // end of the function.
+ for (Argument &AI : BB.getParent()->args())
+ if (AI.hasByValOrInAllocaAttr())
+ DeadStackObjects.insert(&AI);
+
+ const DataLayout &DL = BB.getModule()->getDataLayout();
// Scan the basic block backwards
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
--BBI;
// If we find a store, check to see if it points into a dead stack value.
- if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
+ if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
- GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
+ GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
}
if (AllDead) {
- Instruction *Dead = BBI++;
+ Instruction *Dead = &*BBI++;
DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
<< *Dead << "\n Objects: ";
for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
E = Pointers.end(); I != E; ++I) {
dbgs() << **I;
- if (llvm::next(I) != E)
+ if (std::next(I) != E)
dbgs() << ", ";
}
dbgs() << '\n');
// DCE instructions only used to calculate that store.
- DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
+ DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects);
++NumFastStores;
MadeChange = true;
continue;
}
// Remove any dead non-memory-mutating instructions.
- if (isInstructionTriviallyDead(BBI, TLI)) {
- Instruction *Inst = BBI++;
- DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
+ if (isInstructionTriviallyDead(&*BBI, TLI)) {
+ Instruction *Inst = &*BBI++;
+ DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects);
++NumFastOther;
MadeChange = true;
continue;
if (isa<AllocaInst>(BBI)) {
// Remove allocas from the list of dead stack objects; there can't be
// any references before the definition.
- DeadStackObjects.remove(BBI);
+ DeadStackObjects.remove(&*BBI);
continue;
}
- if (CallSite CS = cast<Value>(BBI)) {
+ if (auto CS = CallSite(&*BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
- if (isAllocLikeFn(BBI, TLI))
- DeadStackObjects.remove(BBI);
+ if (isAllocLikeFn(&*BBI, TLI))
+ DeadStackObjects.remove(&*BBI);
// If this call does not access memory, it can't be loading any of our
// pointers.
// If the call might load from any of our allocas, then any store above
// the call is live.
- CouldRef Pred = { CS, AA };
- DeadStackObjects.remove_if(Pred);
+ DeadStackObjects.remove_if([&](Value *I) {
+ // See if the call site touches the value.
+ ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
+
+ return A == MRI_ModRef || A == MRI_Ref;
+ });
// If all of the allocas were clobbered by the call then we're not going
// to find anything else to process.
continue;
}
- AliasAnalysis::Location LoadedLoc;
+ MemoryLocation LoadedLoc;
// If we encounter a use of the pointer, it is no longer considered dead
if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
if (!L->isUnordered()) // Be conservative with atomic/volatile load
break;
- LoadedLoc = AA->getLocation(L);
+ LoadedLoc = MemoryLocation::get(L);
} else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
- LoadedLoc = AA->getLocation(V);
+ LoadedLoc = MemoryLocation::get(V);
} else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
- LoadedLoc = AA->getLocationForSource(MTI);
+ LoadedLoc = MemoryLocation::getForSource(MTI);
} else if (!BBI->mayReadFromMemory()) {
// Instruction doesn't read memory. Note that stores that weren't removed
// above will hit this case.
// Remove any allocas from the DeadPointer set that are loaded, as this
// makes any stores above the access live.
- RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
+ RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
// If all of the allocas were clobbered by the access then we're not going
// to find anything else to process.
return MadeChange;
}
-namespace {
- struct CouldAlias {
- typedef Value *argument_type;
- const AliasAnalysis::Location &LoadedLoc;
- AliasAnalysis *AA;
-
- bool operator()(Value *I) {
- // See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
- return !AA->isNoAlias(StackLoc, LoadedLoc);
- }
- };
-}
-
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
-void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallSetVector<Value*, 16> &DeadStackObjects) {
- const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
+void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
+ SmallSetVector<Value *, 16> &DeadStackObjects,
+ const DataLayout &DL) {
+ const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
}
// Remove objects that could alias LoadedLoc.
- CouldAlias Pred = { LoadedLoc, AA };
- DeadStackObjects.remove_if(Pred);
+ DeadStackObjects.remove_if([&](Value *I) {
+ // See if the loaded location could alias the stack location.
+ MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
+ return !AA->isNoAlias(StackLoc, LoadedLoc);
+ });
}