X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FDeadStoreElimination.cpp;h=03a557e0600a184ed9bcf3bd02501ea2fa62ba77;hb=5ccb0825ed1bdf6271ef451b8239e86d4ff635b1;hp=ffa77098bc8eff552ee06e89ab54e9f47575dd5f;hpb=1582e7f1e255c19595f82cb447e52869196dec58;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index ffa77098bc8..03a557e0600 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -24,6 +24,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" @@ -33,6 +34,7 @@ #include "llvm/Support/Debug.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/STLExtras.h" using namespace llvm; STATISTIC(NumFastStores, "Number of stores deleted"); @@ -42,25 +44,26 @@ namespace { struct DSE : public FunctionPass { AliasAnalysis *AA; MemoryDependenceAnalysis *MD; + DominatorTree *DT; static char ID; // Pass identification, replacement for typeid - DSE() : FunctionPass(ID), AA(0), MD(0) { + DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) { initializeDSEPass(*PassRegistry::getPassRegistry()); } virtual bool runOnFunction(Function &F) { AA = &getAnalysis(); MD = &getAnalysis(); - DominatorTree &DT = getAnalysis(); + DT = &getAnalysis(); bool Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) // Only check non-dead blocks. Dead blocks may have strange pointer // cycles that will confuse alias analysis. - if (DT.isReachableFromEntry(I)) + if (DT->isReachableFromEntry(I)) Changed |= runOnBasicBlock(*I); - AA = 0; MD = 0; + AA = 0; MD = 0; DT = 0; return Changed; } @@ -238,6 +241,24 @@ static bool isRemovable(Instruction *I) { } } + +/// isShortenable - Returns true if this instruction can be safely shortened in +/// length. +static bool isShortenable(Instruction *I) { + // Don't shorten stores for now + if (isa(I)) + return false; + + IntrinsicInst *II = cast(I); + switch (II->getIntrinsicID()) { + default: return false; + case Intrinsic::memset: + case Intrinsic::memcpy: + // Do shorten memory intrinsics. + return true; + } +} + /// getStoredPointerOperand - Return the pointer that is being written to. static Value *getStoredPointerOperand(Instruction *I) { if (StoreInst *SI = dyn_cast(I)) @@ -255,6 +276,14 @@ static Value *getStoredPointerOperand(Instruction *I) { static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) { const TargetData *TD = AA.getTargetData(); + + if (CallInst *CI = dyn_cast(V)) { + assert(isMalloc(CI) && "Expected Malloc call!"); + if (ConstantInt *C = dyn_cast(CI->getArgOperand(0))) + return C->getZExtValue(); + return AliasAnalysis::UnknownSize; + } + if (TD == 0) return AliasAnalysis::UnknownSize; @@ -265,7 +294,7 @@ static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) { return AliasAnalysis::UnknownSize; } - assert(isa(V) && "Expected AllocaInst or Argument!"); + assert(isa(V) && "Expected AllocaInst, malloc call or Argument!"); PointerType *PT = cast(V->getType()); return TD->getTypeAllocSize(PT->getElementType()); } @@ -279,14 +308,29 @@ static bool isObjectPointerWithTrustworthySize(const Value *V) { return !GV->mayBeOverridden(); if (const Argument *A = dyn_cast(V)) return A->hasByValAttr(); + if (isMalloc(V)) + return true; return false; } -/// isCompleteOverwrite - Return true if a store to the 'Later' location +namespace { + enum OverwriteResult + { + OverwriteComplete, + OverwriteEnd, + OverwriteUnknown + }; +} + +/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location /// completely overwrites a store to the 'Earlier' location. -static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, - const AliasAnalysis::Location &Earlier, - AliasAnalysis &AA) { +/// 'OverwriteEnd' if the end of the 'Earlier' location is completely +/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined +static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, + const AliasAnalysis::Location &Earlier, + AliasAnalysis &AA, + int64_t& EarlierOff, + int64_t& LaterOff) { const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); @@ -300,23 +344,24 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, // If we have no TargetData information around, then the size of the store // is inferrable from the pointee type. If they are the same type, then // we know that the store is safe. - if (AA.getTargetData() == 0) - return Later.Ptr->getType() == Earlier.Ptr->getType(); - return false; + if (AA.getTargetData() == 0 && + Later.Ptr->getType() == Earlier.Ptr->getType()) + return OverwriteComplete; + + return OverwriteUnknown; } // Make sure that the Later size is >= the Earlier size. - if (Later.Size < Earlier.Size) - return false; - return true; + if (Later.Size >= Earlier.Size) + return OverwriteComplete; } // Otherwise, we have to have size information, and the later store has to be // larger than the earlier one. if (Later.Size == AliasAnalysis::UnknownSize || Earlier.Size == AliasAnalysis::UnknownSize || - Later.Size <= Earlier.Size || AA.getTargetData() == 0) - return false; + AA.getTargetData() == 0) + return OverwriteUnknown; // Check to see if the later store is to the entire object (either a global, // an alloca, or a byval argument). If so, then it clearly overwrites any @@ -329,26 +374,27 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, // If we can't resolve the same pointers to the same object, then we can't // analyze them at all. if (UO1 != UO2) - return false; + return OverwriteUnknown; // If the "Later" store is to a recognizable object, get its size. if (isObjectPointerWithTrustworthySize(UO2)) { uint64_t ObjectSize = TD.getTypeAllocSize(cast(UO2->getType())->getElementType()); if (ObjectSize == Later.Size) - return true; + return OverwriteComplete; } // Okay, we have stores to two completely different pointers. Try to // decompose the pointer into a "base + constant_offset" form. If the base // pointers are equal, then we can reason about the two stores. - int64_t EarlierOff = 0, LaterOff = 0; + EarlierOff = 0; + LaterOff = 0; const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD); const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD); // If the base pointers still differ, we have two completely different stores. if (BP1 != BP2) - return false; + return OverwriteUnknown; // The later store completely overlaps the earlier store if: // @@ -366,11 +412,25 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, // // We have to be careful here as *Off is signed while *.Size is unsigned. if (EarlierOff >= LaterOff && + Later.Size > Earlier.Size && uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) - return true; + return OverwriteComplete; + + // The other interesting case is if the later store overwrites the end of + // the earlier store + // + // |--earlier--| + // |-- later --| + // + // In this case we may want to trim the size of earlier to avoid generating + // writes to addresses which will definitely be overwritten later + if (LaterOff > EarlierOff && + LaterOff < int64_t(EarlierOff + Earlier.Size) && + LaterOff + Later.Size >= EarlierOff + Earlier.Size) + return OverwriteEnd; // Otherwise, they don't completely overlap. - return false; + return OverwriteUnknown; } /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a @@ -441,7 +501,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // Ignore any store where we can't find a local dependence. // FIXME: cross-block DSE would be fun. :) - if (InstDep.isNonLocal() || InstDep.isUnknown()) + if (!InstDep.isDef() && !InstDep.isClobber()) continue; // If we're storing the same value back to a pointer that we just @@ -477,7 +537,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { if (Loc.Ptr == 0) continue; - while (!InstDep.isNonLocal() && !InstDep.isUnknown()) { + while (InstDep.isDef() || InstDep.isClobber()) { // Get the memory clobbered by the instruction we depend on. MemDep will // skip any instructions that 'Loc' clearly doesn't interact with. If we // end up depending on a may- or must-aliased load, then we can't optimize @@ -494,22 +554,52 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // If we find a write that is a) removable (i.e., non-volatile), b) is // completely obliterated by the store to 'Loc', and c) which we know that // 'Inst' doesn't load from, then we can remove it. - if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA) && + if (isRemovable(DepWrite) && !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { - DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " - << *DepWrite << "\n KILLER: " << *Inst << '\n'); - - // Delete the store and now-dead instructions that feed it. - DeleteDeadInstruction(DepWrite, *MD); - ++NumFastStores; - MadeChange = true; - - // DeleteDeadInstruction can delete the current instruction in loop - // cases, reset BBI. - BBI = Inst; - if (BBI != BB.begin()) - --BBI; - break; + int64_t InstWriteOffset, DepWriteOffset; + OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, + DepWriteOffset, InstWriteOffset); + if (OR == OverwriteComplete) { + DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " + << *DepWrite << "\n KILLER: " << *Inst << '\n'); + + // Delete the store and now-dead instructions that feed it. + DeleteDeadInstruction(DepWrite, *MD); + ++NumFastStores; + MadeChange = true; + + // DeleteDeadInstruction can delete the current instruction in loop + // cases, reset BBI. + BBI = Inst; + if (BBI != BB.begin()) + --BBI; + break; + } else if (OR == OverwriteEnd && isShortenable(DepWrite)) { + // TODO: base this on the target vector size so that if the earlier + // store was too small to get vector writes anyway then its likely + // a good idea to shorten it + // Power of 2 vector writes are probably always a bad idea to optimize + // as any store/memset/memcpy is likely using vector instructions so + // shortening it to not vector size is likely to be slower + MemIntrinsic* DepIntrinsic = cast(DepWrite); + unsigned DepWriteAlign = DepIntrinsic->getAlignment(); + if (llvm::isPowerOf2_64(InstWriteOffset) || + ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { + + DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " + << *DepWrite << "\n KILLER (offset " + << InstWriteOffset << ", " + << DepLoc.Size << ")" + << *Inst << '\n'); + + Value* DepWriteLength = DepIntrinsic->getLength(); + Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), + InstWriteOffset - + DepWriteOffset); + DepIntrinsic->setLength(TrimmedLength); + MadeChange = true; + } + } } // If this is a may-aliased store that is clobbering the store value, we @@ -538,37 +628,66 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { return MadeChange; } +/// Find all blocks that will unconditionally lead to the block BB and append +/// them to F. +static void FindUnconditionalPreds(SmallVectorImpl &Blocks, + BasicBlock *BB, DominatorTree *DT) { + for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + BasicBlock *Pred = *I; + TerminatorInst *PredTI = Pred->getTerminator(); + if (PredTI->getNumSuccessors() != 1) + continue; + + if (DT->isReachableFromEntry(Pred)) + Blocks.push_back(Pred); + } +} + /// HandleFree - Handle frees of entire structures whose dependency is a store /// to a field of that structure. bool DSE::HandleFree(CallInst *F) { bool MadeChange = false; - MemDepResult Dep = MD->getDependency(F); + AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0)); + SmallVector Blocks; + Blocks.push_back(F->getParent()); - while (!Dep.isNonLocal() && !Dep.isUnknown()) { - Instruction *Dependency = Dep.getInst(); - if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) - return MadeChange; + while (!Blocks.empty()) { + BasicBlock *BB = Blocks.pop_back_val(); + Instruction *InstPt = BB->getTerminator(); + if (BB == F->getParent()) InstPt = F; - Value *DepPointer = - GetUnderlyingObject(getStoredPointerOperand(Dependency)); + MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); + while (Dep.isDef() || Dep.isClobber()) { + Instruction *Dependency = Dep.getInst(); + if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) + break; - // Check for aliasing. - if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) - return MadeChange; + Value *DepPointer = + GetUnderlyingObject(getStoredPointerOperand(Dependency)); - // DCE instructions only used to calculate that store - DeleteDeadInstruction(Dependency, *MD); - ++NumFastStores; - MadeChange = true; + // Check for aliasing. + if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) + break; - // Inst's old Dependency is now deleted. Compute the next dependency, - // which may also be dead, as in - // s[0] = 0; - // s[1] = 0; // This has just been deleted. - // free(s); - Dep = MD->getDependency(F); - }; + Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); + + // DCE instructions only used to calculate that store + DeleteDeadInstruction(Dependency, *MD); + ++NumFastStores; + MadeChange = true; + + // Inst's old Dependency is now deleted. Compute the next dependency, + // which may also be dead, as in + // s[0] = 0; + // s[1] = 0; // This has just been deleted. + // free(s); + Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB); + } + + if (Dep.isNonLocal()) + FindUnconditionalPreds(Blocks, BB, DT); + } return MadeChange; } @@ -588,10 +707,17 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); - for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) + for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { if (AllocaInst *AI = dyn_cast(I)) DeadStackObjects.insert(AI); + // Okay, so these are dead heap objects, but if the pointer never escapes + // then it's leaked by this function anyways. + if (CallInst *CI = extractMallocCall(I)) + if (!PointerMayBeCaptured(CI, true, true)) + DeadStackObjects.insert(CI); + } + // Treat byval arguments the same, stores to them are dead at the end of the // function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), @@ -637,6 +763,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) { continue; } + if (CallInst *CI = extractMallocCall(BBI)) { + DeadStackObjects.erase(CI); + continue; + } + if (CallSite CS = cast(BBI)) { // If this call does not access memory, it can't be loading any of our // pointers.