1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
28 #include "llvm/Analysis/PostDominators.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/Utils/Local.h"
44 #define DEBUG_TYPE "dse"
46 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
47 STATISTIC(NumFastStores, "Number of stores deleted");
48 STATISTIC(NumCrossBlockStores, "Number of cross block stores deleted");
49 STATISTIC(NumFastOther , "Number of other instrs removed");
52 struct DSE : public FunctionPass {
54 MemoryDependenceAnalysis *MD;
56 PostDominatorTree *PDT;
57 const TargetLibraryInfo *TLI;
58 SmallVector<SmallVector<StoreInst *, 8>, 16> Candidates;
59 SetVector<StoreInst *> DeadStores;
60 SmallVector<std::pair<const BasicBlock *, const BasicBlock *>, 32>
62 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> BackEdgesMap;
63 static char ID; // Pass identification, replacement for typeid
65 : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr),
67 initializeDSEPass(*PassRegistry::getPassRegistry());
69 // Return all stores in a given BasicBlock.
70 SmallVector<StoreInst *, 8> getStores(BasicBlock *BB) {
71 SmallVector<StoreInst *, 8> VecStores;
72 for (auto &BI : *BB) {
73 if (StoreInst *SI = dyn_cast<StoreInst>(&BI))
74 VecStores.push_back(SI);
79 // Get dfs in/out on the PDT and populate Candidates store list which
80 // is used to find potential dead stores for a given block
81 void populateCandidateStores(Function &F) {
83 DomTreeNode *DTNode = PDT->getNode(&I);
86 int DFSIn = DTNode->getDFSNumIn();
87 SmallVector<StoreInst *, 8> VecStores = getStores(&I);
88 Candidates[DFSIn] = VecStores;
92 bool runOnFunction(Function &F) override {
93 if (skipOptnoneFunction(F))
96 AA = &getAnalysis<AliasAnalysis>();
97 MD = &getAnalysis<MemoryDependenceAnalysis>();
98 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
99 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
100 PDT = &getAnalysis<PostDominatorTree>();
101 if (PDT->getRootNode()) {
102 int Count = PDT->getRootNode()->getDFSNumOut();
103 SmallVector<StoreInst *, 8> VecStores;
104 Candidates.resize(Count + 1);
105 Candidates.assign(Count + 1, VecStores);
107 // If we have more than 1 block try to populate candidate store.
109 populateCandidateStores(F);
110 FindFunctionBackedges(F, BackEdges);
111 for (auto I : BackEdges)
112 BackEdgesMap.insert(I);
115 bool Changed = false;
116 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
117 // Only check non-dead blocks. Dead blocks may have strange pointer
118 // cycles that will confuse alias analysis.
119 if (DT->isReachableFromEntry(I))
120 Changed |= runOnBasicBlock(*I);
122 AA = nullptr; MD = nullptr; DT = nullptr;
126 bool runOnBasicBlock(BasicBlock &BB);
127 bool MemoryIsNotModifiedBetween(LoadInst *LI, StoreInst *SI);
128 bool HandleFree(CallInst *F);
129 bool handleEndBlock(BasicBlock &BB);
130 void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
131 SmallSetVector<Value *, 16> &DeadStackObjects,
132 const DataLayout &DL);
133 void handleNonLocalStoreDeletion(StoreInst *SI);
134 bool isSafeCandidateForDeletion(BasicBlock *SrcBlock, BasicBlock *SinkBlock,
136 void DeleteDeadInstruction(Instruction *I, MemoryDependenceAnalysis &MD,
137 const TargetLibraryInfo &TLI,
138 SmallSetVector<Value *, 16> *ValueSet = nullptr);
139 void getAnalysisUsage(AnalysisUsage &AU) const override {
140 AU.setPreservesCFG();
141 AU.addRequired<DominatorTreeWrapperPass>();
142 AU.addRequired<AliasAnalysis>();
143 AU.addRequired<MemoryDependenceAnalysis>();
144 AU.addRequired<PostDominatorTree>();
145 AU.addRequired<TargetLibraryInfoWrapperPass>();
146 AU.addPreserved<AliasAnalysis>();
147 AU.addPreserved<DominatorTreeWrapperPass>();
148 AU.addPreserved<MemoryDependenceAnalysis>();
149 AU.addPreserved<PostDominatorTree>();
155 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
156 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
157 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
158 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
159 INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
160 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
161 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
163 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
165 //===----------------------------------------------------------------------===//
167 //===----------------------------------------------------------------------===//
169 /// hasMemoryWrite - Does this instruction write some memory? This only returns
170 /// true for things that we can analyze with other helpers below.
171 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
172 if (isa<StoreInst>(I))
174 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
175 switch (II->getIntrinsicID()) {
178 case Intrinsic::memset:
179 case Intrinsic::memmove:
180 case Intrinsic::memcpy:
181 case Intrinsic::init_trampoline:
182 case Intrinsic::lifetime_end:
186 if (auto CS = CallSite(I)) {
187 if (Function *F = CS.getCalledFunction()) {
188 if (TLI.has(LibFunc::strcpy) &&
189 F->getName() == TLI.getName(LibFunc::strcpy)) {
192 if (TLI.has(LibFunc::strncpy) &&
193 F->getName() == TLI.getName(LibFunc::strncpy)) {
196 if (TLI.has(LibFunc::strcat) &&
197 F->getName() == TLI.getName(LibFunc::strcat)) {
200 if (TLI.has(LibFunc::strncat) &&
201 F->getName() == TLI.getName(LibFunc::strncat)) {
209 /// getLocForWrite - Return a Location stored to by the specified instruction.
210 /// If isRemovable returns true, this function and getLocForRead completely
211 /// describe the memory operations for this instruction.
212 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
213 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
214 return MemoryLocation::get(SI);
216 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
217 // memcpy/memmove/memset.
218 MemoryLocation Loc = MemoryLocation::getForDest(MI);
222 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
224 return MemoryLocation();
226 switch (II->getIntrinsicID()) {
228 return MemoryLocation(); // Unhandled intrinsic.
229 case Intrinsic::init_trampoline:
230 // FIXME: We don't know the size of the trampoline, so we can't really
232 return MemoryLocation(II->getArgOperand(0));
233 case Intrinsic::lifetime_end: {
234 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
235 return MemoryLocation(II->getArgOperand(1), Len);
240 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
241 /// instruction if any.
242 static MemoryLocation getLocForRead(Instruction *Inst,
243 const TargetLibraryInfo &TLI) {
244 assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
246 // The only instructions that both read and write are the mem transfer
247 // instructions (memcpy/memmove).
248 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
249 return MemoryLocation::getForSource(MTI);
250 return MemoryLocation();
254 /// isRemovable - If the value of this instruction and the memory it writes to
255 /// is unused, may we delete this instruction?
256 static bool isRemovable(Instruction *I) {
257 // Don't remove volatile/atomic stores.
258 if (StoreInst *SI = dyn_cast<StoreInst>(I))
259 return SI->isUnordered();
261 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
262 switch (II->getIntrinsicID()) {
263 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
264 case Intrinsic::lifetime_end:
265 // Never remove dead lifetime_end's, e.g. because it is followed by a
268 case Intrinsic::init_trampoline:
269 // Always safe to remove init_trampoline.
272 case Intrinsic::memset:
273 case Intrinsic::memmove:
274 case Intrinsic::memcpy:
275 // Don't remove volatile memory intrinsics.
276 return !cast<MemIntrinsic>(II)->isVolatile();
280 if (auto CS = CallSite(I))
281 return CS.getInstruction()->use_empty();
287 /// isShortenable - Returns true if this instruction can be safely shortened in
289 static bool isShortenable(Instruction *I) {
290 // Don't shorten stores for now
291 if (isa<StoreInst>(I))
294 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
295 switch (II->getIntrinsicID()) {
296 default: return false;
297 case Intrinsic::memset:
298 case Intrinsic::memcpy:
299 // Do shorten memory intrinsics.
304 // Don't shorten libcalls calls for now.
309 /// getStoredPointerOperand - Return the pointer that is being written to.
310 static Value *getStoredPointerOperand(Instruction *I) {
311 if (StoreInst *SI = dyn_cast<StoreInst>(I))
312 return SI->getPointerOperand();
313 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
314 return MI->getDest();
316 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
317 switch (II->getIntrinsicID()) {
318 default: llvm_unreachable("Unexpected intrinsic!");
319 case Intrinsic::init_trampoline:
320 return II->getArgOperand(0);
325 // All the supported functions so far happen to have dest as their first
327 return CS.getArgument(0);
330 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
331 const TargetLibraryInfo &TLI) {
333 if (getObjectSize(V, Size, DL, &TLI))
335 return MemoryLocation::UnknownSize;
347 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
348 /// completely overwrites a store to the 'Earlier' location.
349 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
350 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
351 static OverwriteResult isOverwrite(const MemoryLocation &Later,
352 const MemoryLocation &Earlier,
353 const DataLayout &DL,
354 const TargetLibraryInfo &TLI,
355 int64_t &EarlierOff, int64_t &LaterOff) {
356 const Value *P1 = Earlier.Ptr->stripPointerCasts();
357 const Value *P2 = Later.Ptr->stripPointerCasts();
359 // If the start pointers are the same, we just have to compare sizes to see if
360 // the later store was larger than the earlier store.
362 // If we don't know the sizes of either access, then we can't do a
364 if (Later.Size == MemoryLocation::UnknownSize ||
365 Earlier.Size == MemoryLocation::UnknownSize)
366 return OverwriteUnknown;
368 // Make sure that the Later size is >= the Earlier size.
369 if (Later.Size >= Earlier.Size)
370 return OverwriteComplete;
373 // Otherwise, we have to have size information, and the later store has to be
374 // larger than the earlier one.
375 if (Later.Size == MemoryLocation::UnknownSize ||
376 Earlier.Size == MemoryLocation::UnknownSize)
377 return OverwriteUnknown;
379 // Check to see if the later store is to the entire object (either a global,
380 // an alloca, or a byval/inalloca argument). If so, then it clearly
381 // overwrites any other store to the same object.
382 const Value *UO1 = GetUnderlyingObject(P1, DL),
383 *UO2 = GetUnderlyingObject(P2, DL);
385 // If we can't resolve the same pointers to the same object, then we can't
386 // analyze them at all.
388 return OverwriteUnknown;
390 // If the "Later" store is to a recognizable object, get its size.
391 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
392 if (ObjectSize != MemoryLocation::UnknownSize)
393 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
394 return OverwriteComplete;
396 // Okay, we have stores to two completely different pointers. Try to
397 // decompose the pointer into a "base + constant_offset" form. If the base
398 // pointers are equal, then we can reason about the two stores.
401 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
402 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
404 // If the base pointers still differ, we have two completely different stores.
406 return OverwriteUnknown;
408 // The later store completely overlaps the earlier store if:
410 // 1. Both start at the same offset and the later one's size is greater than
411 // or equal to the earlier one's, or
416 // 2. The earlier store has an offset greater than the later offset, but which
417 // still lies completely within the later store.
420 // |----- later ------|
422 // We have to be careful here as *Off is signed while *.Size is unsigned.
423 if (EarlierOff >= LaterOff &&
424 Later.Size >= Earlier.Size &&
425 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
426 return OverwriteComplete;
428 // The other interesting case is if the later store overwrites the end of
434 // In this case we may want to trim the size of earlier to avoid generating
435 // writes to addresses which will definitely be overwritten later
436 if (LaterOff > EarlierOff &&
437 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
438 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
441 // Otherwise, they don't completely overlap.
442 return OverwriteUnknown;
445 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
446 /// memory region into an identical pointer) then it doesn't actually make its
447 /// input dead in the traditional sense. Consider this case:
452 /// In this case, the second store to A does not make the first store to A dead.
453 /// The usual situation isn't an explicit A<-A store like this (which can be
454 /// trivially removed) but a case where two pointers may alias.
456 /// This function detects when it is unsafe to remove a dependent instruction
457 /// because the DSE inducing instruction may be a self-read.
458 static bool isPossibleSelfRead(Instruction *Inst,
459 const MemoryLocation &InstStoreLoc,
460 Instruction *DepWrite,
461 const TargetLibraryInfo &TLI,
463 // Self reads can only happen for instructions that read memory. Get the
465 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
466 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
468 // If the read and written loc obviously don't alias, it isn't a read.
469 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
471 // Okay, 'Inst' may copy over itself. However, we can still remove a the
472 // DepWrite instruction if we can prove that it reads from the same location
473 // as Inst. This handles useful cases like:
476 // Here we don't know if A/B may alias, but we do know that B/B are must
477 // aliases, so removing the first memcpy is safe (assuming it writes <= #
478 // bytes as the second one.
479 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
481 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
484 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
485 // then it can't be considered dead.
490 //===----------------------------------------------------------------------===//
492 //===----------------------------------------------------------------------===//
494 bool DSE::runOnBasicBlock(BasicBlock &BB) {
495 bool MadeChange = false;
497 // Do a top-down walk on the BB.
498 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
499 Instruction *Inst = BBI++;
501 // Handle 'free' calls specially.
502 if (CallInst *F = isFreeCall(Inst, TLI)) {
503 MadeChange |= HandleFree(F);
507 // If we find something that writes memory, get its memory dependence.
508 if (!hasMemoryWrite(Inst, *TLI))
511 // If we're storing the same value back to a pointer that we just
512 // loaded from, then the store can be removed.
513 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
514 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
515 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
517 MemoryIsNotModifiedBetween(DepLoad, SI)) {
519 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
520 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
522 // DeleteDeadInstruction can delete the current instruction. Save BBI
523 // in case we need it.
524 WeakVH NextInst(BBI);
526 DeleteDeadInstruction(SI, *MD, *TLI);
528 if (!NextInst) // Next instruction deleted.
530 else if (BBI != BB.begin()) // Revisit this instruction if possible.
532 ++NumRedundantStores;
539 MemDepResult InstDep = MD->getDependency(Inst);
541 if (!InstDep.isDef() && !InstDep.isClobber() && !InstDep.isNonLocal())
543 if (InstDep.isNonLocal()) {
544 if (!PDT->getRootNode())
546 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
547 handleNonLocalStoreDeletion(SI);
551 // Figure out what location is being stored to.
552 MemoryLocation Loc = getLocForWrite(Inst, *AA);
554 // If we didn't get a useful location, fail.
558 while (InstDep.isDef() || InstDep.isClobber()) {
559 // Get the memory clobbered by the instruction we depend on. MemDep will
560 // skip any instructions that 'Loc' clearly doesn't interact with. If we
561 // end up depending on a may- or must-aliased load, then we can't optimize
562 // away the store and we bail out. However, if we depend on on something
563 // that overwrites the memory location we *can* potentially optimize it.
565 // Find out what memory location the dependent instruction stores.
566 Instruction *DepWrite = InstDep.getInst();
567 MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
568 // If we didn't get a useful location, or if it isn't a size, bail out.
572 // If we find a write that is a) removable (i.e., non-volatile), b) is
573 // completely obliterated by the store to 'Loc', and c) which we know that
574 // 'Inst' doesn't load from, then we can remove it.
575 if (isRemovable(DepWrite) &&
576 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
577 int64_t InstWriteOffset, DepWriteOffset;
578 const DataLayout &DL = BB.getModule()->getDataLayout();
580 isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset);
581 if (OR == OverwriteComplete) {
582 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
583 << *DepWrite << "\n KILLER: " << *Inst << '\n');
585 // Delete the store and now-dead instructions that feed it.
586 DeleteDeadInstruction(DepWrite, *MD, *TLI);
590 // DeleteDeadInstruction can delete the current instruction in loop
593 if (BBI != BB.begin())
596 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
597 // TODO: base this on the target vector size so that if the earlier
598 // store was too small to get vector writes anyway then its likely
599 // a good idea to shorten it
600 // Power of 2 vector writes are probably always a bad idea to optimize
601 // as any store/memset/memcpy is likely using vector instructions so
602 // shortening it to not vector size is likely to be slower
603 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
604 unsigned DepWriteAlign = DepIntrinsic->getAlignment();
605 if (llvm::isPowerOf2_64(InstWriteOffset) ||
606 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
608 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
609 << *DepWrite << "\n KILLER (offset "
610 << InstWriteOffset << ", "
611 << DepLoc.Size << ")"
614 Value* DepWriteLength = DepIntrinsic->getLength();
615 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
618 DepIntrinsic->setLength(TrimmedLength);
624 // If this is a may-aliased store that is clobbering the store value, we
625 // can keep searching past it for another must-aliased pointer that stores
626 // to the same location. For example, in:
630 // we can remove the first store to P even though we don't know if P and Q
632 if (DepWrite == &BB.front()) break;
634 // Can't look past this instruction if it might read 'Loc'.
635 if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
638 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
642 // If this block ends in a return, unwind, or unreachable, all allocas are
643 // dead at its end, which means stores to them are also dead.
644 if (BB.getTerminator()->getNumSuccessors() == 0)
645 MadeChange |= handleEndBlock(BB);
650 /// Returns true if the memory which is accessed by the store instruction is not
651 /// modified between the load and the store instruction.
652 /// Precondition: The store instruction must be dominated by the load
654 bool DSE::MemoryIsNotModifiedBetween(LoadInst *LI, StoreInst *SI) {
655 SmallVector<BasicBlock *, 16> WorkList;
656 SmallPtrSet<BasicBlock *, 8> Visited;
657 BasicBlock::iterator LoadBBI(LI);
659 BasicBlock::iterator StoreBBI(SI);
660 BasicBlock *LoadBB = LI->getParent();
661 BasicBlock *StoreBB = SI->getParent();
662 MemoryLocation StoreLoc = MemoryLocation::get(SI);
664 // Start checking the store-block.
665 WorkList.push_back(StoreBB);
666 bool isFirstBlock = true;
668 // Check all blocks going backward until we reach the load-block.
669 while (!WorkList.empty()) {
670 BasicBlock *B = WorkList.pop_back_val();
672 // Ignore instructions before LI if this is the LoadBB.
673 BasicBlock::iterator BI = (B == LoadBB ? LoadBBI : B->begin());
675 BasicBlock::iterator EI;
677 // Ignore instructions after SI if this is the first visit of StoreBB.
678 assert(B == StoreBB && "first block is not the store block");
680 isFirstBlock = false;
682 // It's not StoreBB or (in case of a loop) the second visit of StoreBB.
683 // In this case we also have to look at instructions after SI.
686 for (; BI != EI; ++BI) {
688 if (I->mayWriteToMemory() && I != SI) {
689 auto Res = AA->getModRefInfo(I, StoreLoc);
690 if (Res != MRI_NoModRef)
695 assert(B != &LoadBB->getParent()->getEntryBlock() &&
696 "Should not hit the entry block because SI must be dominated by LI");
697 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
698 if (!Visited.insert(*PredI).second)
700 WorkList.push_back(*PredI);
707 /// Find all blocks that will unconditionally lead to the block BB and append
709 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
710 BasicBlock *BB, DominatorTree *DT) {
711 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
712 BasicBlock *Pred = *I;
713 if (Pred == BB) continue;
714 TerminatorInst *PredTI = Pred->getTerminator();
715 if (PredTI->getNumSuccessors() != 1)
718 if (DT->isReachableFromEntry(Pred))
719 Blocks.push_back(Pred);
723 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
724 /// and zero out all the operands of this instruction. If any of them become
725 /// dead, delete them and the computation tree that feeds them.
726 /// If ValueSet is non-null, remove any deleted instructions from it as well.
727 void DSE::DeleteDeadInstruction(Instruction *I, MemoryDependenceAnalysis &MD,
728 const TargetLibraryInfo &TLI,
729 SmallSetVector<Value *, 16> *ValueSet) {
730 SmallVector<Instruction *, 32> NowDeadInsts;
732 NowDeadInsts.push_back(I);
735 // Before we touch this instruction, remove it from memdep!
737 Instruction *DeadInst = NowDeadInsts.pop_back_val();
739 if (StoreInst *SI = dyn_cast<StoreInst>(DeadInst))
740 DeadStores.insert(SI);
742 // This instruction is dead, zap it, in stages. Start by removing it from
743 // MemDep, which needs to know the operands and needs it to be in the
745 MD.removeInstruction(DeadInst);
747 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
748 Value *Op = DeadInst->getOperand(op);
749 DeadInst->setOperand(op, nullptr);
751 // If this operand just became dead, add it to the NowDeadInsts list.
752 if (!Op->use_empty())
755 if (Instruction *OpI = dyn_cast<Instruction>(Op))
756 if (isInstructionTriviallyDead(OpI, &TLI))
757 NowDeadInsts.push_back(OpI);
760 DeadInst->eraseFromParent();
763 ValueSet->remove(DeadInst);
764 } while (!NowDeadInsts.empty());
767 /// HandleFree - Handle frees of entire structures whose dependency is a store
768 /// to a field of that structure.
769 bool DSE::HandleFree(CallInst *F) {
770 bool MadeChange = false;
772 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
773 SmallVector<BasicBlock *, 16> Blocks;
774 Blocks.push_back(F->getParent());
775 const DataLayout &DL = F->getModule()->getDataLayout();
777 while (!Blocks.empty()) {
778 BasicBlock *BB = Blocks.pop_back_val();
779 Instruction *InstPt = BB->getTerminator();
780 if (BB == F->getParent()) InstPt = F;
782 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
783 while (Dep.isDef() || Dep.isClobber()) {
784 Instruction *Dependency = Dep.getInst();
785 if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
789 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
791 // Check for aliasing.
792 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
795 Instruction *Next = std::next(BasicBlock::iterator(Dependency));
797 // DCE instructions only used to calculate that store
798 DeleteDeadInstruction(Dependency, *MD, *TLI);
802 // Inst's old Dependency is now deleted. Compute the next dependency,
803 // which may also be dead, as in
805 // s[1] = 0; // This has just been deleted.
807 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
810 if (Dep.isNonLocal())
811 FindUnconditionalPreds(Blocks, BB, DT);
817 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
818 /// function end block. Ex:
821 /// store i32 1, i32* %A
823 bool DSE::handleEndBlock(BasicBlock &BB) {
824 bool MadeChange = false;
826 // Keep track of all of the stack objects that are dead at the end of the
828 SmallSetVector<Value*, 16> DeadStackObjects;
830 // Find all of the alloca'd pointers in the entry block.
831 BasicBlock *Entry = BB.getParent()->begin();
832 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
833 if (isa<AllocaInst>(I))
834 DeadStackObjects.insert(I);
836 // Okay, so these are dead heap objects, but if the pointer never escapes
837 // then it's leaked by this function anyways.
838 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
839 DeadStackObjects.insert(I);
842 // Treat byval or inalloca arguments the same, stores to them are dead at the
843 // end of the function.
844 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
845 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
846 if (AI->hasByValOrInAllocaAttr())
847 DeadStackObjects.insert(AI);
849 const DataLayout &DL = BB.getModule()->getDataLayout();
851 // Scan the basic block backwards
852 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
855 // If we find a store, check to see if it points into a dead stack value.
856 if (hasMemoryWrite(BBI, *TLI) && isRemovable(BBI)) {
857 // See through pointer-to-pointer bitcasts
858 SmallVector<Value *, 4> Pointers;
859 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
861 // Stores to stack values are valid candidates for removal.
863 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
864 E = Pointers.end(); I != E; ++I)
865 if (!DeadStackObjects.count(*I)) {
871 Instruction *Dead = BBI++;
873 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
874 << *Dead << "\n Objects: ";
875 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
876 E = Pointers.end(); I != E; ++I) {
878 if (std::next(I) != E)
883 // DCE instructions only used to calculate that store.
884 DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects);
891 // Remove any dead non-memory-mutating instructions.
892 if (isInstructionTriviallyDead(BBI, TLI)) {
893 Instruction *Inst = BBI++;
894 DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects);
900 if (isa<AllocaInst>(BBI)) {
901 // Remove allocas from the list of dead stack objects; there can't be
902 // any references before the definition.
903 DeadStackObjects.remove(BBI);
907 if (auto CS = CallSite(BBI)) {
908 // Remove allocation function calls from the list of dead stack objects;
909 // there can't be any references before the definition.
910 if (isAllocLikeFn(BBI, TLI))
911 DeadStackObjects.remove(BBI);
913 // If this call does not access memory, it can't be loading any of our
915 if (AA->doesNotAccessMemory(CS))
918 // If the call might load from any of our allocas, then any store above
920 DeadStackObjects.remove_if([&](Value *I) {
921 // See if the call site touches the value.
922 ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
924 return A == MRI_ModRef || A == MRI_Ref;
927 // If all of the allocas were clobbered by the call then we're not going
928 // to find anything else to process.
929 if (DeadStackObjects.empty())
935 MemoryLocation LoadedLoc;
937 // If we encounter a use of the pointer, it is no longer considered dead
938 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
939 if (!L->isUnordered()) // Be conservative with atomic/volatile load
941 LoadedLoc = MemoryLocation::get(L);
942 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
943 LoadedLoc = MemoryLocation::get(V);
944 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
945 LoadedLoc = MemoryLocation::getForSource(MTI);
946 } else if (!BBI->mayReadFromMemory()) {
947 // Instruction doesn't read memory. Note that stores that weren't removed
948 // above will hit this case.
951 // Unknown inst; assume it clobbers everything.
955 // Remove any allocas from the DeadPointer set that are loaded, as this
956 // makes any stores above the access live.
957 RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
959 // If all of the allocas were clobbered by the access then we're not going
960 // to find anything else to process.
961 if (DeadStackObjects.empty())
968 /// RemoveAccessedObjects - Check to see if the specified location may alias any
969 /// of the stack objects in the DeadStackObjects set. If so, they become live
970 /// because the location is being loaded.
971 void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
972 SmallSetVector<Value *, 16> &DeadStackObjects,
973 const DataLayout &DL) {
974 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
976 // A constant can't be in the dead pointer set.
977 if (isa<Constant>(UnderlyingPointer))
980 // If the kill pointer can be easily reduced to an alloca, don't bother doing
981 // extraneous AA queries.
982 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
983 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
987 // Remove objects that could alias LoadedLoc.
988 DeadStackObjects.remove_if([&](Value *I) {
989 // See if the loaded location could alias the stack location.
990 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
991 return !AA->isNoAlias(StackLoc, LoadedLoc);
995 /// isSafeCandidateForDeletion- Check all paths from the SrcBlock till
996 /// SinkBlock to see if Store 'SI' is safe to be remove.
997 /// Returns true if the candidate store SI is safe to delete
998 /// else returns false.
999 bool DSE::isSafeCandidateForDeletion(BasicBlock *SrcBlock,
1000 BasicBlock *SinkBlock, StoreInst *SI) {
1001 SmallVector<BasicBlock *, 16> WorkList;
1002 SmallPtrSet<BasicBlock *, 8> Visited;
1003 BasicBlock::iterator BBI(SI);
1005 // Check from the store till end of block and make sure we have no references
1006 // to memory stored by this Store Instruction.
1007 for (auto BI = ++BBI, BE = SrcBlock->end(); BI != BE; ++BI) {
1008 Instruction *I = BI;
1009 StoreInst *CSI = dyn_cast<StoreInst>(I);
1012 AA->alias(MemoryLocation::get(SI), MemoryLocation::get(CSI));
1016 ModRefInfo Res = AA->getModRefInfo(I, MemoryLocation::get(SI));
1017 if (Res != MRI_NoModRef)
1022 // Add successors of the block to stack and start DFS.
1023 for (succ_iterator I = succ_begin(SrcBlock), E = succ_end(SrcBlock); I != E;
1025 if (!Visited.insert(*I).second)
1027 // A path with backedge may not be safe. Conservatively mark
1028 // this store unsafe.
1029 if (BackEdgesMap.count(std::make_pair(SrcBlock, *I)))
1031 WorkList.push_back(*I);
1034 while (!WorkList.empty()) {
1035 BasicBlock *B = WorkList.pop_back_val();
1036 auto BI = B->begin();
1038 for (; BI != BE; ++BI) {
1039 Instruction *I = BI;
1040 StoreInst *CSI = dyn_cast<StoreInst>(I);
1043 AA->alias(MemoryLocation::get(SI), MemoryLocation::get(CSI));
1047 ModRefInfo Res = AA->getModRefInfo(I, MemoryLocation::get(SI));
1048 if (Res != MRI_NoModRef)
1053 // If we reached the sink node or we found a block which has a stores that
1054 // overwrites the candidate block we need not look at their successors.
1055 if (B == SinkBlock || BI != BE)
1058 for (succ_iterator I = succ_begin(B), E = succ_end(B); I != E; ++I) {
1059 if (!Visited.insert(*I).second)
1061 // A path with backedge may not be safe.Conservatively mark
1062 // this store unsafe.
1063 if (BackEdgesMap.count(std::make_pair(B, *I)))
1065 WorkList.push_back(*I);
1072 /// handleNonLocalStoreDeletion - Handle non local dead store elimination.
1073 /// This works by finding candidate stores using PDT and then running DFS
1074 /// from candidate store block checking all paths to make sure the store is
1076 void DSE::handleNonLocalStoreDeletion(StoreInst *SI) {
1077 BasicBlock *BB = SI->getParent();
1078 Value *Pointer = SI->getPointerOperand();
1079 DomTreeNode *DTNode = PDT->getNode(BB);
1083 int DFSNumIn = DTNode->getDFSNumIn();
1084 int DFSNumOut = DTNode->getDFSNumOut();
1085 for (int i = DFSNumIn + 1; i < DFSNumOut; ++i) {
1086 for (auto &I : Candidates[i]) {
1087 StoreInst *CandidateSI = I;
1088 if (DeadStores.count(CandidateSI))
1090 Value *MemPtr = CandidateSI->getPointerOperand();
1093 if (Pointer->getType() != MemPtr->getType())
1096 AA->alias(MemoryLocation::get(SI), MemoryLocation::get(CandidateSI));
1099 if (isSafeCandidateForDeletion(CandidateSI->getParent(), BB,
1101 DeleteDeadInstruction(CandidateSI, *MD, *TLI);
1102 ++NumCrossBlockStores;