X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FDeadStoreElimination.cpp;h=f157cbfe8bd758c6df2961e04b09a94cd3c7a7ec;hp=803564182506534a6af677742c5441fa30849608;hb=609d95290a4d913139ba0b46f5b5b982dc4b1d96;hpb=97a1a6144721c2eec2323ff85a80f77b36b58c96 diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 80356418250..f157cbfe8bd 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -15,28 +15,31 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "dse" #include "llvm/Transforms/Scalar.h" -#include "llvm/Constants.h" -#include "llvm/Function.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Pass.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CaptureTracking.h" -#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Transforms/Utils/Local.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/Pass.h" #include "llvm/Support/Debug.h" -#include "llvm/ADT/SetVector.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/Local.h" using namespace llvm; +#define DEBUG_TYPE "dse" + STATISTIC(NumFastStores, "Number of stores deleted"); STATISTIC(NumFastOther , "Number of other instrs removed"); @@ -48,15 +51,18 @@ namespace { const TargetLibraryInfo *TLI; static char ID; // Pass identification, replacement for typeid - DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) { + DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) { initializeDSEPass(*PassRegistry::getPassRegistry()); } - virtual bool runOnFunction(Function &F) { + bool runOnFunction(Function &F) override { + if (skipOptnoneFunction(F)) + return false; + AA = &getAnalysis(); MD = &getAnalysis(); - DT = &getAnalysis(); - TLI = AA->getTargetLibraryInfo(); + DT = &getAnalysis().getDomTree(); + TLI = &getAnalysis().getTLI(); bool Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) @@ -65,23 +71,25 @@ namespace { if (DT->isReachableFromEntry(I)) Changed |= runOnBasicBlock(*I); - AA = 0; MD = 0; DT = 0; + AA = nullptr; MD = nullptr; DT = nullptr; return Changed; } bool runOnBasicBlock(BasicBlock &BB); bool HandleFree(CallInst *F); bool handleEndBlock(BasicBlock &BB); - void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, - SmallSetVector &DeadStackObjects); + void RemoveAccessedObjects(const MemoryLocation &LoadedLoc, + SmallSetVector &DeadStackObjects, + const DataLayout &DL); - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); + AU.addRequired(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); } }; @@ -89,9 +97,10 @@ namespace { char DSE::ID = 0; INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) -INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false) FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } @@ -107,9 +116,9 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } /// If ValueSet is non-null, remove any deleted instructions from it as well. /// static void DeleteDeadInstruction(Instruction *I, - MemoryDependenceAnalysis &MD, - const TargetLibraryInfo *TLI, - SmallSetVector *ValueSet = 0) { + MemoryDependenceAnalysis &MD, + const TargetLibraryInfo &TLI, + SmallSetVector *ValueSet = nullptr) { SmallVector NowDeadInsts; NowDeadInsts.push_back(I); @@ -127,13 +136,13 @@ static void DeleteDeadInstruction(Instruction *I, for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { Value *Op = DeadInst->getOperand(op); - DeadInst->setOperand(op, 0); + DeadInst->setOperand(op, nullptr); // If this operand just became dead, add it to the NowDeadInsts list. if (!Op->use_empty()) continue; if (Instruction *OpI = dyn_cast(Op)) - if (isInstructionTriviallyDead(OpI, TLI)) + if (isInstructionTriviallyDead(OpI, &TLI)) NowDeadInsts.push_back(OpI); } @@ -146,7 +155,7 @@ static void DeleteDeadInstruction(Instruction *I, /// hasMemoryWrite - Does this instruction write some memory? This only returns /// true for things that we can analyze with other helpers below. -static bool hasMemoryWrite(Instruction *I) { +static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) { if (isa(I)) return true; if (IntrinsicInst *II = dyn_cast(I)) { @@ -161,60 +170,71 @@ static bool hasMemoryWrite(Instruction *I) { return true; } } + if (auto CS = CallSite(I)) { + if (Function *F = CS.getCalledFunction()) { + if (TLI.has(LibFunc::strcpy) && + F->getName() == TLI.getName(LibFunc::strcpy)) { + return true; + } + if (TLI.has(LibFunc::strncpy) && + F->getName() == TLI.getName(LibFunc::strncpy)) { + return true; + } + if (TLI.has(LibFunc::strcat) && + F->getName() == TLI.getName(LibFunc::strcat)) { + return true; + } + if (TLI.has(LibFunc::strncat) && + F->getName() == TLI.getName(LibFunc::strncat)) { + return true; + } + } + } return false; } /// getLocForWrite - Return a Location stored to by the specified instruction. /// If isRemovable returns true, this function and getLocForRead completely /// describe the memory operations for this instruction. -static AliasAnalysis::Location -getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { +static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { if (StoreInst *SI = dyn_cast(Inst)) - return AA.getLocation(SI); + return MemoryLocation::get(SI); if (MemIntrinsic *MI = dyn_cast(Inst)) { // memcpy/memmove/memset. - AliasAnalysis::Location Loc = AA.getLocationForDest(MI); - // If we don't have target data around, an unknown size in Location means - // that we should use the size of the pointee type. This isn't valid for - // memset/memcpy, which writes more than an i8. - if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0) - return AliasAnalysis::Location(); + MemoryLocation Loc = MemoryLocation::getForDest(MI); return Loc; } IntrinsicInst *II = dyn_cast(Inst); - if (II == 0) return AliasAnalysis::Location(); + if (!II) + return MemoryLocation(); switch (II->getIntrinsicID()) { - default: return AliasAnalysis::Location(); // Unhandled intrinsic. + default: + return MemoryLocation(); // Unhandled intrinsic. case Intrinsic::init_trampoline: - // If we don't have target data around, an unknown size in Location means - // that we should use the size of the pointee type. This isn't valid for - // init.trampoline, which writes more than an i8. - if (AA.getTargetData() == 0) return AliasAnalysis::Location(); - // FIXME: We don't know the size of the trampoline, so we can't really // handle it here. - return AliasAnalysis::Location(II->getArgOperand(0)); + return MemoryLocation(II->getArgOperand(0)); case Intrinsic::lifetime_end: { uint64_t Len = cast(II->getArgOperand(0))->getZExtValue(); - return AliasAnalysis::Location(II->getArgOperand(1), Len); + return MemoryLocation(II->getArgOperand(1), Len); } } } /// getLocForRead - Return the location read by the specified "hasMemoryWrite" /// instruction if any. -static AliasAnalysis::Location -getLocForRead(Instruction *Inst, AliasAnalysis &AA) { - assert(hasMemoryWrite(Inst) && "Unknown instruction case"); +static MemoryLocation getLocForRead(Instruction *Inst, + const TargetLibraryInfo &TLI) { + assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case"); // The only instructions that both read and write are the mem transfer // instructions (memcpy/memmove). if (MemTransferInst *MTI = dyn_cast(Inst)) - return AA.getLocationForSource(MTI); - return AliasAnalysis::Location(); + return MemoryLocation::getForSource(MTI); + return MemoryLocation(); } @@ -225,23 +245,29 @@ static bool isRemovable(Instruction *I) { if (StoreInst *SI = dyn_cast(I)) return SI->isUnordered(); - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); - case Intrinsic::lifetime_end: - // Never remove dead lifetime_end's, e.g. because it is followed by a - // free. - return false; - case Intrinsic::init_trampoline: - // Always safe to remove init_trampoline. - return true; + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); + case Intrinsic::lifetime_end: + // Never remove dead lifetime_end's, e.g. because it is followed by a + // free. + return false; + case Intrinsic::init_trampoline: + // Always safe to remove init_trampoline. + return true; - case Intrinsic::memset: - case Intrinsic::memmove: - case Intrinsic::memcpy: - // Don't remove volatile memory intrinsics. - return !cast(II)->isVolatile(); + case Intrinsic::memset: + case Intrinsic::memmove: + case Intrinsic::memcpy: + // Don't remove volatile memory intrinsics. + return !cast(II)->isVolatile(); + } } + + if (auto CS = CallSite(I)) + return CS.getInstruction()->use_empty(); + + return false; } @@ -252,14 +278,19 @@ static bool isShortenable(Instruction *I) { if (isa(I)) return false; - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: return false; - case Intrinsic::memset: - case Intrinsic::memcpy: - // Do shorten memory intrinsics. - return true; + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: return false; + case Intrinsic::memset: + case Intrinsic::memcpy: + // Do shorten memory intrinsics. + return true; + } } + + // Don't shorten libcalls calls for now. + + return false; } /// getStoredPointerOperand - Return the pointer that is being written to. @@ -269,19 +300,26 @@ static Value *getStoredPointerOperand(Instruction *I) { if (MemIntrinsic *MI = dyn_cast(I)) return MI->getDest(); - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: llvm_unreachable("Unexpected intrinsic!"); - case Intrinsic::init_trampoline: - return II->getArgOperand(0); + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("Unexpected intrinsic!"); + case Intrinsic::init_trampoline: + return II->getArgOperand(0); + } } + + CallSite CS(I); + // All the supported functions so far happen to have dest as their first + // argument. + return CS.getArgument(0); } -static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { +static uint64_t getPointerSize(const Value *V, const DataLayout &DL, + const TargetLibraryInfo &TLI) { uint64_t Size; - if (getObjectSize(V, Size, AA.getTargetData(), AA.getTargetLibraryInfo())) + if (getObjectSize(V, Size, DL, &TLI)) return Size; - return AliasAnalysis::UnknownSize; + return MemoryLocation::UnknownSize; } namespace { @@ -297,11 +335,11 @@ namespace { /// completely overwrites a store to the 'Earlier' location. /// 'OverwriteEnd' if the end of the 'Earlier' location is completely /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined -static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, - const AliasAnalysis::Location &Earlier, - AliasAnalysis &AA, - int64_t &EarlierOff, - int64_t &LaterOff) { +static OverwriteResult isOverwrite(const MemoryLocation &Later, + const MemoryLocation &Earlier, + const DataLayout &DL, + const TargetLibraryInfo &TLI, + int64_t &EarlierOff, int64_t &LaterOff) { const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); @@ -310,17 +348,9 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, if (P1 == P2) { // If we don't know the sizes of either access, then we can't do a // comparison. - if (Later.Size == AliasAnalysis::UnknownSize || - Earlier.Size == AliasAnalysis::UnknownSize) { - // If we have no TargetData information around, then the size of the store - // is inferrable from the pointee type. If they are the same type, then - // we know that the store is safe. - if (AA.getTargetData() == 0 && - Later.Ptr->getType() == Earlier.Ptr->getType()) - return OverwriteComplete; - + if (Later.Size == MemoryLocation::UnknownSize || + Earlier.Size == MemoryLocation::UnknownSize) return OverwriteUnknown; - } // Make sure that the Later size is >= the Earlier size. if (Later.Size >= Earlier.Size) @@ -329,18 +359,15 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, // Otherwise, we have to have size information, and the later store has to be // larger than the earlier one. - if (Later.Size == AliasAnalysis::UnknownSize || - Earlier.Size == AliasAnalysis::UnknownSize || - AA.getTargetData() == 0) + if (Later.Size == MemoryLocation::UnknownSize || + Earlier.Size == MemoryLocation::UnknownSize) return OverwriteUnknown; // Check to see if the later store is to the entire object (either a global, - // an alloca, or a byval argument). If so, then it clearly overwrites any - // other store to the same object. - const TargetData &TD = *AA.getTargetData(); - - const Value *UO1 = GetUnderlyingObject(P1, &TD), - *UO2 = GetUnderlyingObject(P2, &TD); + // an alloca, or a byval/inalloca argument). If so, then it clearly + // overwrites any other store to the same object. + const Value *UO1 = GetUnderlyingObject(P1, DL), + *UO2 = GetUnderlyingObject(P2, DL); // If we can't resolve the same pointers to the same object, then we can't // analyze them at all. @@ -348,8 +375,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, return OverwriteUnknown; // If the "Later" store is to a recognizable object, get its size. - uint64_t ObjectSize = getPointerSize(UO2, AA); - if (ObjectSize != AliasAnalysis::UnknownSize) + uint64_t ObjectSize = getPointerSize(UO2, DL, TLI); + if (ObjectSize != MemoryLocation::UnknownSize) if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) return OverwriteComplete; @@ -358,8 +385,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, // pointers are equal, then we can reason about the two stores. EarlierOff = 0; LaterOff = 0; - const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD); - const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD); + const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); + const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); // If the base pointers still differ, we have two completely different stores. if (BP1 != BP2) @@ -416,12 +443,14 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, /// This function detects when it is unsafe to remove a dependent instruction /// because the DSE inducing instruction may be a self-read. static bool isPossibleSelfRead(Instruction *Inst, - const AliasAnalysis::Location &InstStoreLoc, - Instruction *DepWrite, AliasAnalysis &AA) { + const MemoryLocation &InstStoreLoc, + Instruction *DepWrite, + const TargetLibraryInfo &TLI, + AliasAnalysis &AA) { // Self reads can only happen for instructions that read memory. Get the // location read. - AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA); - if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction. + MemoryLocation InstReadLoc = getLocForRead(Inst, TLI); + if (!InstReadLoc.Ptr) return false; // Not a reading instruction. // If the read and written loc obviously don't alias, it isn't a read. if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false; @@ -434,7 +463,7 @@ static bool isPossibleSelfRead(Instruction *Inst, // Here we don't know if A/B may alias, but we do know that B/B are must // aliases, so removing the first memcpy is safe (assuming it writes <= # // bytes as the second one. - AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA); + MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI); if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) return false; @@ -463,7 +492,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { } // If we find something that writes memory, get its memory dependence. - if (!hasMemoryWrite(Inst)) + if (!hasMemoryWrite(Inst, *TLI)) continue; MemDepResult InstDep = MD->getDependency(Inst); @@ -486,9 +515,9 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // in case we need it. WeakVH NextInst(BBI); - DeleteDeadInstruction(SI, *MD, TLI); + DeleteDeadInstruction(SI, *MD, *TLI); - if (NextInst == 0) // Next instruction deleted. + if (!NextInst) // Next instruction deleted. BBI = BB.begin(); else if (BBI != BB.begin()) // Revisit this instruction if possible. --BBI; @@ -500,10 +529,10 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { } // Figure out what location is being stored to. - AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA); + MemoryLocation Loc = getLocForWrite(Inst, *AA); // If we didn't get a useful location, fail. - if (Loc.Ptr == 0) + if (!Loc.Ptr) continue; while (InstDep.isDef() || InstDep.isClobber()) { @@ -515,25 +544,26 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // // Find out what memory location the dependent instruction stores. Instruction *DepWrite = InstDep.getInst(); - AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA); + MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA); // If we didn't get a useful location, or if it isn't a size, bail out. - if (DepLoc.Ptr == 0) + if (!DepLoc.Ptr) break; // If we find a write that is a) removable (i.e., non-volatile), b) is // completely obliterated by the store to 'Loc', and c) which we know that // 'Inst' doesn't load from, then we can remove it. if (isRemovable(DepWrite) && - !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { + !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) { int64_t InstWriteOffset, DepWriteOffset; - OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, - DepWriteOffset, InstWriteOffset); + const DataLayout &DL = BB.getModule()->getDataLayout(); + OverwriteResult OR = + isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset); if (OR == OverwriteComplete) { DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite << "\n KILLER: " << *Inst << '\n'); // Delete the store and now-dead instructions that feed it. - DeleteDeadInstruction(DepWrite, *MD, TLI); + DeleteDeadInstruction(DepWrite, *MD, *TLI); ++NumFastStores; MadeChange = true; @@ -582,7 +612,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { if (DepWrite == &BB.front()) break; // Can't look past this instruction if it might read 'Loc'. - if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref) + if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref) break; InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB); @@ -618,9 +648,10 @@ static void FindUnconditionalPreds(SmallVectorImpl &Blocks, bool DSE::HandleFree(CallInst *F) { bool MadeChange = false; - AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0)); + MemoryLocation Loc = MemoryLocation(F->getOperand(0)); SmallVector Blocks; Blocks.push_back(F->getParent()); + const DataLayout &DL = F->getModule()->getDataLayout(); while (!Blocks.empty()) { BasicBlock *BB = Blocks.pop_back_val(); @@ -630,20 +661,20 @@ bool DSE::HandleFree(CallInst *F) { MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); while (Dep.isDef() || Dep.isClobber()) { Instruction *Dependency = Dep.getInst(); - if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) + if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency)) break; Value *DepPointer = - GetUnderlyingObject(getStoredPointerOperand(Dependency)); + GetUnderlyingObject(getStoredPointerOperand(Dependency), DL); // Check for aliasing. if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) break; - Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); + Instruction *Next = std::next(BasicBlock::iterator(Dependency)); // DCE instructions only used to calculate that store - DeleteDeadInstruction(Dependency, *MD, TLI); + DeleteDeadInstruction(Dependency, *MD, *TLI); ++NumFastStores; MadeChange = true; @@ -687,22 +718,24 @@ bool DSE::handleEndBlock(BasicBlock &BB) { DeadStackObjects.insert(I); } - // Treat byval arguments the same, stores to them are dead at the end of the - // function. + // Treat byval or inalloca arguments the same, stores to them are dead at the + // end of the function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) - if (AI->hasByValAttr()) + if (AI->hasByValOrInAllocaAttr()) DeadStackObjects.insert(AI); + const DataLayout &DL = BB.getModule()->getDataLayout(); + // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. - if (hasMemoryWrite(BBI) && isRemovable(BBI)) { + if (hasMemoryWrite(BBI, *TLI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts SmallVector Pointers; - GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); + GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL); // Stores to stack values are valid candidates for removal. bool AllDead = true; @@ -721,13 +754,13 @@ bool DSE::handleEndBlock(BasicBlock &BB) { for (SmallVectorImpl::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; - if (llvm::next(I) != E) + if (std::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); // DCE instructions only used to calculate that store. - DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); + DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; @@ -737,7 +770,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(BBI, TLI)) { Instruction *Inst = BBI++; - DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); + DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; @@ -750,7 +783,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { continue; } - if (CallSite CS = cast(BBI)) { + if (auto CS = CallSite(BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(BBI, TLI)) @@ -763,40 +796,32 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // If the call might load from any of our allocas, then any store above // the call is live. - SmallVector LiveAllocas; - for (SmallSetVector::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { - // See if the call site touches it. - AliasAnalysis::ModRefResult A = - AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); - - if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) - LiveAllocas.push_back(*I); - } + DeadStackObjects.remove_if([&](Value *I) { + // See if the call site touches the value. + ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)); + + return A == MRI_ModRef || A == MRI_Ref; + }); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. - if (DeadStackObjects.size() == LiveAllocas.size()) + if (DeadStackObjects.empty()) break; - for (SmallVector::iterator I = LiveAllocas.begin(), - E = LiveAllocas.end(); I != E; ++I) - DeadStackObjects.remove(*I); - continue; } - AliasAnalysis::Location LoadedLoc; + MemoryLocation LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast(BBI)) { if (!L->isUnordered()) // Be conservative with atomic/volatile load break; - LoadedLoc = AA->getLocation(L); + LoadedLoc = MemoryLocation::get(L); } else if (VAArgInst *V = dyn_cast(BBI)) { - LoadedLoc = AA->getLocation(V); + LoadedLoc = MemoryLocation::get(V); } else if (MemTransferInst *MTI = dyn_cast(BBI)) { - LoadedLoc = AA->getLocationForSource(MTI); + LoadedLoc = MemoryLocation::getForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed // above will hit this case. @@ -808,7 +833,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. - RemoveAccessedObjects(LoadedLoc, DeadStackObjects); + RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. @@ -822,9 +847,10 @@ bool DSE::handleEndBlock(BasicBlock &BB) { /// RemoveAccessedObjects - Check to see if the specified location may alias any /// of the stack objects in the DeadStackObjects set. If so, they become live /// because the location is being loaded. -void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, - SmallSetVector &DeadStackObjects) { - const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr); +void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc, + SmallSetVector &DeadStackObjects, + const DataLayout &DL) { + const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL); // A constant can't be in the dead pointer set. if (isa(UnderlyingPointer)) @@ -837,16 +863,10 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, return; } - SmallVector NowLive; - for (SmallSetVector::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { + // Remove objects that could alias LoadedLoc. + DeadStackObjects.remove_if([&](Value *I) { // See if the loaded location could alias the stack location. - AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA)); - if (!AA->isNoAlias(StackLoc, LoadedLoc)) - NowLive.push_back(*I); - } - - for (SmallVector::iterator I = NowLive.begin(), E = NowLive.end(); - I != E; ++I) - DeadStackObjects.remove(*I); + MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI)); + return !AA->isNoAlias(StackLoc, LoadedLoc); + }); }