1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/Pass.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Transforms/Utils/Local.h"
40 #define DEBUG_TYPE "dse"
42 STATISTIC(NumFastStores, "Number of stores deleted");
43 STATISTIC(NumFastOther , "Number of other instrs removed");
46 struct DSE : public FunctionPass {
48 MemoryDependenceAnalysis *MD;
50 const TargetLibraryInfo *TLI;
52 static char ID; // Pass identification, replacement for typeid
53 DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
54 initializeDSEPass(*PassRegistry::getPassRegistry());
57 bool runOnFunction(Function &F) override {
58 if (skipOptnoneFunction(F))
61 AA = &getAnalysis<AliasAnalysis>();
62 MD = &getAnalysis<MemoryDependenceAnalysis>();
63 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
64 TLI = AA->getTargetLibraryInfo();
67 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
68 // Only check non-dead blocks. Dead blocks may have strange pointer
69 // cycles that will confuse alias analysis.
70 if (DT->isReachableFromEntry(I))
71 Changed |= runOnBasicBlock(*I);
73 AA = nullptr; MD = nullptr; DT = nullptr;
77 bool runOnBasicBlock(BasicBlock &BB);
78 bool HandleFree(CallInst *F);
79 bool handleEndBlock(BasicBlock &BB);
80 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
81 SmallSetVector<Value *, 16> &DeadStackObjects,
82 const DataLayout &DL);
84 void getAnalysisUsage(AnalysisUsage &AU) const override {
86 AU.addRequired<DominatorTreeWrapperPass>();
87 AU.addRequired<AliasAnalysis>();
88 AU.addRequired<MemoryDependenceAnalysis>();
89 AU.addPreserved<AliasAnalysis>();
90 AU.addPreserved<DominatorTreeWrapperPass>();
91 AU.addPreserved<MemoryDependenceAnalysis>();
97 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
98 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
99 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
100 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
101 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
103 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
105 //===----------------------------------------------------------------------===//
107 //===----------------------------------------------------------------------===//
109 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
110 /// and zero out all the operands of this instruction. If any of them become
111 /// dead, delete them and the computation tree that feeds them.
113 /// If ValueSet is non-null, remove any deleted instructions from it as well.
115 static void DeleteDeadInstruction(Instruction *I,
116 MemoryDependenceAnalysis &MD,
117 const TargetLibraryInfo *TLI,
118 SmallSetVector<Value*, 16> *ValueSet = nullptr) {
119 SmallVector<Instruction*, 32> NowDeadInsts;
121 NowDeadInsts.push_back(I);
124 // Before we touch this instruction, remove it from memdep!
126 Instruction *DeadInst = NowDeadInsts.pop_back_val();
129 // This instruction is dead, zap it, in stages. Start by removing it from
130 // MemDep, which needs to know the operands and needs it to be in the
132 MD.removeInstruction(DeadInst);
134 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
135 Value *Op = DeadInst->getOperand(op);
136 DeadInst->setOperand(op, nullptr);
138 // If this operand just became dead, add it to the NowDeadInsts list.
139 if (!Op->use_empty()) continue;
141 if (Instruction *OpI = dyn_cast<Instruction>(Op))
142 if (isInstructionTriviallyDead(OpI, TLI))
143 NowDeadInsts.push_back(OpI);
146 DeadInst->eraseFromParent();
148 if (ValueSet) ValueSet->remove(DeadInst);
149 } while (!NowDeadInsts.empty());
153 /// hasMemoryWrite - Does this instruction write some memory? This only returns
154 /// true for things that we can analyze with other helpers below.
155 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
156 if (isa<StoreInst>(I))
158 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
159 switch (II->getIntrinsicID()) {
162 case Intrinsic::memset:
163 case Intrinsic::memmove:
164 case Intrinsic::memcpy:
165 case Intrinsic::init_trampoline:
166 case Intrinsic::lifetime_end:
170 if (CallSite CS = I) {
171 if (Function *F = CS.getCalledFunction()) {
172 if (TLI && TLI->has(LibFunc::strcpy) &&
173 F->getName() == TLI->getName(LibFunc::strcpy)) {
176 if (TLI && TLI->has(LibFunc::strncpy) &&
177 F->getName() == TLI->getName(LibFunc::strncpy)) {
180 if (TLI && TLI->has(LibFunc::strcat) &&
181 F->getName() == TLI->getName(LibFunc::strcat)) {
184 if (TLI && TLI->has(LibFunc::strncat) &&
185 F->getName() == TLI->getName(LibFunc::strncat)) {
193 /// getLocForWrite - Return a Location stored to by the specified instruction.
194 /// If isRemovable returns true, this function and getLocForRead completely
195 /// describe the memory operations for this instruction.
196 static AliasAnalysis::Location
197 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
198 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
199 return AA.getLocation(SI);
201 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
202 // memcpy/memmove/memset.
203 AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
207 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
208 if (!II) return AliasAnalysis::Location();
210 switch (II->getIntrinsicID()) {
211 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
212 case Intrinsic::init_trampoline:
213 // FIXME: We don't know the size of the trampoline, so we can't really
215 return AliasAnalysis::Location(II->getArgOperand(0));
216 case Intrinsic::lifetime_end: {
217 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
218 return AliasAnalysis::Location(II->getArgOperand(1), Len);
223 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
224 /// instruction if any.
225 static AliasAnalysis::Location
226 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
227 assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
228 "Unknown instruction case");
230 // The only instructions that both read and write are the mem transfer
231 // instructions (memcpy/memmove).
232 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
233 return AA.getLocationForSource(MTI);
234 return AliasAnalysis::Location();
238 /// isRemovable - If the value of this instruction and the memory it writes to
239 /// is unused, may we delete this instruction?
240 static bool isRemovable(Instruction *I) {
241 // Don't remove volatile/atomic stores.
242 if (StoreInst *SI = dyn_cast<StoreInst>(I))
243 return SI->isUnordered();
245 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
246 switch (II->getIntrinsicID()) {
247 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
248 case Intrinsic::lifetime_end:
249 // Never remove dead lifetime_end's, e.g. because it is followed by a
252 case Intrinsic::init_trampoline:
253 // Always safe to remove init_trampoline.
256 case Intrinsic::memset:
257 case Intrinsic::memmove:
258 case Intrinsic::memcpy:
259 // Don't remove volatile memory intrinsics.
260 return !cast<MemIntrinsic>(II)->isVolatile();
265 return CS.getInstruction()->use_empty();
271 /// isShortenable - Returns true if this instruction can be safely shortened in
273 static bool isShortenable(Instruction *I) {
274 // Don't shorten stores for now
275 if (isa<StoreInst>(I))
278 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
279 switch (II->getIntrinsicID()) {
280 default: return false;
281 case Intrinsic::memset:
282 case Intrinsic::memcpy:
283 // Do shorten memory intrinsics.
288 // Don't shorten libcalls calls for now.
293 /// getStoredPointerOperand - Return the pointer that is being written to.
294 static Value *getStoredPointerOperand(Instruction *I) {
295 if (StoreInst *SI = dyn_cast<StoreInst>(I))
296 return SI->getPointerOperand();
297 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
298 return MI->getDest();
300 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
301 switch (II->getIntrinsicID()) {
302 default: llvm_unreachable("Unexpected intrinsic!");
303 case Intrinsic::init_trampoline:
304 return II->getArgOperand(0);
309 // All the supported functions so far happen to have dest as their first
311 return CS.getArgument(0);
314 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
315 const TargetLibraryInfo *TLI) {
317 if (getObjectSize(V, Size, DL, TLI))
319 return AliasAnalysis::UnknownSize;
331 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
332 /// completely overwrites a store to the 'Earlier' location.
333 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
334 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
335 static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
336 const AliasAnalysis::Location &Earlier,
337 const DataLayout &DL,
338 const TargetLibraryInfo *TLI,
339 int64_t &EarlierOff, int64_t &LaterOff) {
340 const Value *P1 = Earlier.Ptr->stripPointerCasts();
341 const Value *P2 = Later.Ptr->stripPointerCasts();
343 // If the start pointers are the same, we just have to compare sizes to see if
344 // the later store was larger than the earlier store.
346 // If we don't know the sizes of either access, then we can't do a
348 if (Later.Size == AliasAnalysis::UnknownSize ||
349 Earlier.Size == AliasAnalysis::UnknownSize)
350 return OverwriteUnknown;
352 // Make sure that the Later size is >= the Earlier size.
353 if (Later.Size >= Earlier.Size)
354 return OverwriteComplete;
357 // Otherwise, we have to have size information, and the later store has to be
358 // larger than the earlier one.
359 if (Later.Size == AliasAnalysis::UnknownSize ||
360 Earlier.Size == AliasAnalysis::UnknownSize)
361 return OverwriteUnknown;
363 // Check to see if the later store is to the entire object (either a global,
364 // an alloca, or a byval/inalloca argument). If so, then it clearly
365 // overwrites any other store to the same object.
366 const Value *UO1 = GetUnderlyingObject(P1, DL),
367 *UO2 = GetUnderlyingObject(P2, DL);
369 // If we can't resolve the same pointers to the same object, then we can't
370 // analyze them at all.
372 return OverwriteUnknown;
374 // If the "Later" store is to a recognizable object, get its size.
375 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
376 if (ObjectSize != AliasAnalysis::UnknownSize)
377 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
378 return OverwriteComplete;
380 // Okay, we have stores to two completely different pointers. Try to
381 // decompose the pointer into a "base + constant_offset" form. If the base
382 // pointers are equal, then we can reason about the two stores.
385 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
386 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
388 // If the base pointers still differ, we have two completely different stores.
390 return OverwriteUnknown;
392 // The later store completely overlaps the earlier store if:
394 // 1. Both start at the same offset and the later one's size is greater than
395 // or equal to the earlier one's, or
400 // 2. The earlier store has an offset greater than the later offset, but which
401 // still lies completely within the later store.
404 // |----- later ------|
406 // We have to be careful here as *Off is signed while *.Size is unsigned.
407 if (EarlierOff >= LaterOff &&
408 Later.Size >= Earlier.Size &&
409 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
410 return OverwriteComplete;
412 // The other interesting case is if the later store overwrites the end of
418 // In this case we may want to trim the size of earlier to avoid generating
419 // writes to addresses which will definitely be overwritten later
420 if (LaterOff > EarlierOff &&
421 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
422 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
425 // Otherwise, they don't completely overlap.
426 return OverwriteUnknown;
429 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
430 /// memory region into an identical pointer) then it doesn't actually make its
431 /// input dead in the traditional sense. Consider this case:
436 /// In this case, the second store to A does not make the first store to A dead.
437 /// The usual situation isn't an explicit A<-A store like this (which can be
438 /// trivially removed) but a case where two pointers may alias.
440 /// This function detects when it is unsafe to remove a dependent instruction
441 /// because the DSE inducing instruction may be a self-read.
442 static bool isPossibleSelfRead(Instruction *Inst,
443 const AliasAnalysis::Location &InstStoreLoc,
444 Instruction *DepWrite, AliasAnalysis &AA) {
445 // Self reads can only happen for instructions that read memory. Get the
447 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
448 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
450 // If the read and written loc obviously don't alias, it isn't a read.
451 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
453 // Okay, 'Inst' may copy over itself. However, we can still remove a the
454 // DepWrite instruction if we can prove that it reads from the same location
455 // as Inst. This handles useful cases like:
458 // Here we don't know if A/B may alias, but we do know that B/B are must
459 // aliases, so removing the first memcpy is safe (assuming it writes <= #
460 // bytes as the second one.
461 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
463 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
466 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
467 // then it can't be considered dead.
472 //===----------------------------------------------------------------------===//
474 //===----------------------------------------------------------------------===//
476 bool DSE::runOnBasicBlock(BasicBlock &BB) {
477 bool MadeChange = false;
479 // Do a top-down walk on the BB.
480 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
481 Instruction *Inst = BBI++;
483 // Handle 'free' calls specially.
484 if (CallInst *F = isFreeCall(Inst, TLI)) {
485 MadeChange |= HandleFree(F);
489 // If we find something that writes memory, get its memory dependence.
490 if (!hasMemoryWrite(Inst, TLI))
493 MemDepResult InstDep = MD->getDependency(Inst);
495 // Ignore any store where we can't find a local dependence.
496 // FIXME: cross-block DSE would be fun. :)
497 if (!InstDep.isDef() && !InstDep.isClobber())
500 // If we're storing the same value back to a pointer that we just
501 // loaded from, then the store can be removed.
502 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
503 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
504 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
505 SI->getOperand(0) == DepLoad && isRemovable(SI)) {
506 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
507 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
509 // DeleteDeadInstruction can delete the current instruction. Save BBI
510 // in case we need it.
511 WeakVH NextInst(BBI);
513 DeleteDeadInstruction(SI, *MD, TLI);
515 if (!NextInst) // Next instruction deleted.
517 else if (BBI != BB.begin()) // Revisit this instruction if possible.
526 // Figure out what location is being stored to.
527 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
529 // If we didn't get a useful location, fail.
533 while (InstDep.isDef() || InstDep.isClobber()) {
534 // Get the memory clobbered by the instruction we depend on. MemDep will
535 // skip any instructions that 'Loc' clearly doesn't interact with. If we
536 // end up depending on a may- or must-aliased load, then we can't optimize
537 // away the store and we bail out. However, if we depend on on something
538 // that overwrites the memory location we *can* potentially optimize it.
540 // Find out what memory location the dependent instruction stores.
541 Instruction *DepWrite = InstDep.getInst();
542 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
543 // If we didn't get a useful location, or if it isn't a size, bail out.
547 // If we find a write that is a) removable (i.e., non-volatile), b) is
548 // completely obliterated by the store to 'Loc', and c) which we know that
549 // 'Inst' doesn't load from, then we can remove it.
550 if (isRemovable(DepWrite) &&
551 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
552 int64_t InstWriteOffset, DepWriteOffset;
553 const DataLayout &DL = BB.getModule()->getDataLayout();
555 isOverwrite(Loc, DepLoc, DL, AA->getTargetLibraryInfo(),
556 DepWriteOffset, InstWriteOffset);
557 if (OR == OverwriteComplete) {
558 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
559 << *DepWrite << "\n KILLER: " << *Inst << '\n');
561 // Delete the store and now-dead instructions that feed it.
562 DeleteDeadInstruction(DepWrite, *MD, TLI);
566 // DeleteDeadInstruction can delete the current instruction in loop
569 if (BBI != BB.begin())
572 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
573 // TODO: base this on the target vector size so that if the earlier
574 // store was too small to get vector writes anyway then its likely
575 // a good idea to shorten it
576 // Power of 2 vector writes are probably always a bad idea to optimize
577 // as any store/memset/memcpy is likely using vector instructions so
578 // shortening it to not vector size is likely to be slower
579 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
580 unsigned DepWriteAlign = DepIntrinsic->getAlignment();
581 if (llvm::isPowerOf2_64(InstWriteOffset) ||
582 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
584 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
585 << *DepWrite << "\n KILLER (offset "
586 << InstWriteOffset << ", "
587 << DepLoc.Size << ")"
590 Value* DepWriteLength = DepIntrinsic->getLength();
591 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
594 DepIntrinsic->setLength(TrimmedLength);
600 // If this is a may-aliased store that is clobbering the store value, we
601 // can keep searching past it for another must-aliased pointer that stores
602 // to the same location. For example, in:
606 // we can remove the first store to P even though we don't know if P and Q
608 if (DepWrite == &BB.front()) break;
610 // Can't look past this instruction if it might read 'Loc'.
611 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
614 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
618 // If this block ends in a return, unwind, or unreachable, all allocas are
619 // dead at its end, which means stores to them are also dead.
620 if (BB.getTerminator()->getNumSuccessors() == 0)
621 MadeChange |= handleEndBlock(BB);
626 /// Find all blocks that will unconditionally lead to the block BB and append
628 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
629 BasicBlock *BB, DominatorTree *DT) {
630 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
631 BasicBlock *Pred = *I;
632 if (Pred == BB) continue;
633 TerminatorInst *PredTI = Pred->getTerminator();
634 if (PredTI->getNumSuccessors() != 1)
637 if (DT->isReachableFromEntry(Pred))
638 Blocks.push_back(Pred);
642 /// HandleFree - Handle frees of entire structures whose dependency is a store
643 /// to a field of that structure.
644 bool DSE::HandleFree(CallInst *F) {
645 bool MadeChange = false;
647 AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
648 SmallVector<BasicBlock *, 16> Blocks;
649 Blocks.push_back(F->getParent());
650 const DataLayout &DL = F->getModule()->getDataLayout();
652 while (!Blocks.empty()) {
653 BasicBlock *BB = Blocks.pop_back_val();
654 Instruction *InstPt = BB->getTerminator();
655 if (BB == F->getParent()) InstPt = F;
657 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
658 while (Dep.isDef() || Dep.isClobber()) {
659 Instruction *Dependency = Dep.getInst();
660 if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
664 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
666 // Check for aliasing.
667 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
670 Instruction *Next = std::next(BasicBlock::iterator(Dependency));
672 // DCE instructions only used to calculate that store
673 DeleteDeadInstruction(Dependency, *MD, TLI);
677 // Inst's old Dependency is now deleted. Compute the next dependency,
678 // which may also be dead, as in
680 // s[1] = 0; // This has just been deleted.
682 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
685 if (Dep.isNonLocal())
686 FindUnconditionalPreds(Blocks, BB, DT);
692 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
693 /// function end block. Ex:
696 /// store i32 1, i32* %A
698 bool DSE::handleEndBlock(BasicBlock &BB) {
699 bool MadeChange = false;
701 // Keep track of all of the stack objects that are dead at the end of the
703 SmallSetVector<Value*, 16> DeadStackObjects;
705 // Find all of the alloca'd pointers in the entry block.
706 BasicBlock *Entry = BB.getParent()->begin();
707 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
708 if (isa<AllocaInst>(I))
709 DeadStackObjects.insert(I);
711 // Okay, so these are dead heap objects, but if the pointer never escapes
712 // then it's leaked by this function anyways.
713 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
714 DeadStackObjects.insert(I);
717 // Treat byval or inalloca arguments the same, stores to them are dead at the
718 // end of the function.
719 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
720 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
721 if (AI->hasByValOrInAllocaAttr())
722 DeadStackObjects.insert(AI);
724 const DataLayout &DL = BB.getModule()->getDataLayout();
726 // Scan the basic block backwards
727 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
730 // If we find a store, check to see if it points into a dead stack value.
731 if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
732 // See through pointer-to-pointer bitcasts
733 SmallVector<Value *, 4> Pointers;
734 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
736 // Stores to stack values are valid candidates for removal.
738 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
739 E = Pointers.end(); I != E; ++I)
740 if (!DeadStackObjects.count(*I)) {
746 Instruction *Dead = BBI++;
748 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
749 << *Dead << "\n Objects: ";
750 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
751 E = Pointers.end(); I != E; ++I) {
753 if (std::next(I) != E)
758 // DCE instructions only used to calculate that store.
759 DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
766 // Remove any dead non-memory-mutating instructions.
767 if (isInstructionTriviallyDead(BBI, TLI)) {
768 Instruction *Inst = BBI++;
769 DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
775 if (isa<AllocaInst>(BBI)) {
776 // Remove allocas from the list of dead stack objects; there can't be
777 // any references before the definition.
778 DeadStackObjects.remove(BBI);
782 if (CallSite CS = cast<Value>(BBI)) {
783 // Remove allocation function calls from the list of dead stack objects;
784 // there can't be any references before the definition.
785 if (isAllocLikeFn(BBI, TLI))
786 DeadStackObjects.remove(BBI);
788 // If this call does not access memory, it can't be loading any of our
790 if (AA->doesNotAccessMemory(CS))
793 // If the call might load from any of our allocas, then any store above
795 DeadStackObjects.remove_if([&](Value *I) {
796 // See if the call site touches the value.
797 AliasAnalysis::ModRefResult A = AA->getModRefInfo(
798 CS, I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
800 return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
803 // If all of the allocas were clobbered by the call then we're not going
804 // to find anything else to process.
805 if (DeadStackObjects.empty())
811 AliasAnalysis::Location LoadedLoc;
813 // If we encounter a use of the pointer, it is no longer considered dead
814 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
815 if (!L->isUnordered()) // Be conservative with atomic/volatile load
817 LoadedLoc = AA->getLocation(L);
818 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
819 LoadedLoc = AA->getLocation(V);
820 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
821 LoadedLoc = AA->getLocationForSource(MTI);
822 } else if (!BBI->mayReadFromMemory()) {
823 // Instruction doesn't read memory. Note that stores that weren't removed
824 // above will hit this case.
827 // Unknown inst; assume it clobbers everything.
831 // Remove any allocas from the DeadPointer set that are loaded, as this
832 // makes any stores above the access live.
833 RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
835 // If all of the allocas were clobbered by the access then we're not going
836 // to find anything else to process.
837 if (DeadStackObjects.empty())
844 /// RemoveAccessedObjects - Check to see if the specified location may alias any
845 /// of the stack objects in the DeadStackObjects set. If so, they become live
846 /// because the location is being loaded.
847 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
848 SmallSetVector<Value *, 16> &DeadStackObjects,
849 const DataLayout &DL) {
850 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
852 // A constant can't be in the dead pointer set.
853 if (isa<Constant>(UnderlyingPointer))
856 // If the kill pointer can be easily reduced to an alloca, don't bother doing
857 // extraneous AA queries.
858 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
859 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
863 // Remove objects that could alias LoadedLoc.
864 DeadStackObjects.remove_if([&](Value *I) {
865 // See if the loaded location could alias the stack location.
866 AliasAnalysis::Location StackLoc(
867 I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
868 return !AA->isNoAlias(StackLoc, LoadedLoc);