1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/Local.h"
41 #define DEBUG_TYPE "dse"
43 STATISTIC(NumFastStores, "Number of stores deleted");
44 STATISTIC(NumFastOther , "Number of other instrs removed");
47 struct DSE : public FunctionPass {
49 MemoryDependenceAnalysis *MD;
51 const TargetLibraryInfo *TLI;
53 static char ID; // Pass identification, replacement for typeid
54 DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
55 initializeDSEPass(*PassRegistry::getPassRegistry());
58 bool runOnFunction(Function &F) override {
59 if (skipOptnoneFunction(F))
62 AA = &getAnalysis<AliasAnalysis>();
63 MD = &getAnalysis<MemoryDependenceAnalysis>();
64 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
65 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
68 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
69 // Only check non-dead blocks. Dead blocks may have strange pointer
70 // cycles that will confuse alias analysis.
71 if (DT->isReachableFromEntry(I))
72 Changed |= runOnBasicBlock(*I);
74 AA = nullptr; MD = nullptr; DT = nullptr;
78 bool runOnBasicBlock(BasicBlock &BB);
79 bool HandleFree(CallInst *F);
80 bool handleEndBlock(BasicBlock &BB);
81 void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
82 SmallSetVector<Value *, 16> &DeadStackObjects,
83 const DataLayout &DL);
85 void getAnalysisUsage(AnalysisUsage &AU) const override {
87 AU.addRequired<DominatorTreeWrapperPass>();
88 AU.addRequired<AliasAnalysis>();
89 AU.addRequired<MemoryDependenceAnalysis>();
90 AU.addRequired<TargetLibraryInfoWrapperPass>();
91 AU.addPreserved<AliasAnalysis>();
92 AU.addPreserved<DominatorTreeWrapperPass>();
93 AU.addPreserved<MemoryDependenceAnalysis>();
99 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
100 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
101 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
102 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
103 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
105 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
107 //===----------------------------------------------------------------------===//
109 //===----------------------------------------------------------------------===//
111 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
112 /// and zero out all the operands of this instruction. If any of them become
113 /// dead, delete them and the computation tree that feeds them.
115 /// If ValueSet is non-null, remove any deleted instructions from it as well.
117 static void DeleteDeadInstruction(Instruction *I,
118 MemoryDependenceAnalysis &MD,
119 const TargetLibraryInfo &TLI,
120 SmallSetVector<Value*, 16> *ValueSet = nullptr) {
121 SmallVector<Instruction*, 32> NowDeadInsts;
123 NowDeadInsts.push_back(I);
126 // Before we touch this instruction, remove it from memdep!
128 Instruction *DeadInst = NowDeadInsts.pop_back_val();
131 // This instruction is dead, zap it, in stages. Start by removing it from
132 // MemDep, which needs to know the operands and needs it to be in the
134 MD.removeInstruction(DeadInst);
136 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
137 Value *Op = DeadInst->getOperand(op);
138 DeadInst->setOperand(op, nullptr);
140 // If this operand just became dead, add it to the NowDeadInsts list.
141 if (!Op->use_empty()) continue;
143 if (Instruction *OpI = dyn_cast<Instruction>(Op))
144 if (isInstructionTriviallyDead(OpI, &TLI))
145 NowDeadInsts.push_back(OpI);
148 DeadInst->eraseFromParent();
150 if (ValueSet) ValueSet->remove(DeadInst);
151 } while (!NowDeadInsts.empty());
155 /// hasMemoryWrite - Does this instruction write some memory? This only returns
156 /// true for things that we can analyze with other helpers below.
157 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
158 if (isa<StoreInst>(I))
160 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
161 switch (II->getIntrinsicID()) {
164 case Intrinsic::memset:
165 case Intrinsic::memmove:
166 case Intrinsic::memcpy:
167 case Intrinsic::init_trampoline:
168 case Intrinsic::lifetime_end:
172 if (auto CS = CallSite(I)) {
173 if (Function *F = CS.getCalledFunction()) {
174 if (TLI.has(LibFunc::strcpy) &&
175 F->getName() == TLI.getName(LibFunc::strcpy)) {
178 if (TLI.has(LibFunc::strncpy) &&
179 F->getName() == TLI.getName(LibFunc::strncpy)) {
182 if (TLI.has(LibFunc::strcat) &&
183 F->getName() == TLI.getName(LibFunc::strcat)) {
186 if (TLI.has(LibFunc::strncat) &&
187 F->getName() == TLI.getName(LibFunc::strncat)) {
195 /// getLocForWrite - Return a Location stored to by the specified instruction.
196 /// If isRemovable returns true, this function and getLocForRead completely
197 /// describe the memory operations for this instruction.
198 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
199 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
200 return MemoryLocation::get(SI);
202 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
203 // memcpy/memmove/memset.
204 MemoryLocation Loc = MemoryLocation::getForDest(MI);
208 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
210 return MemoryLocation();
212 switch (II->getIntrinsicID()) {
214 return MemoryLocation(); // Unhandled intrinsic.
215 case Intrinsic::init_trampoline:
216 // FIXME: We don't know the size of the trampoline, so we can't really
218 return MemoryLocation(II->getArgOperand(0));
219 case Intrinsic::lifetime_end: {
220 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
221 return MemoryLocation(II->getArgOperand(1), Len);
226 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
227 /// instruction if any.
228 static MemoryLocation getLocForRead(Instruction *Inst,
229 const TargetLibraryInfo &TLI) {
230 assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
232 // The only instructions that both read and write are the mem transfer
233 // instructions (memcpy/memmove).
234 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
235 return MemoryLocation::getForSource(MTI);
236 return MemoryLocation();
240 /// isRemovable - If the value of this instruction and the memory it writes to
241 /// is unused, may we delete this instruction?
242 static bool isRemovable(Instruction *I) {
243 // Don't remove volatile/atomic stores.
244 if (StoreInst *SI = dyn_cast<StoreInst>(I))
245 return SI->isUnordered();
247 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
248 switch (II->getIntrinsicID()) {
249 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
250 case Intrinsic::lifetime_end:
251 // Never remove dead lifetime_end's, e.g. because it is followed by a
254 case Intrinsic::init_trampoline:
255 // Always safe to remove init_trampoline.
258 case Intrinsic::memset:
259 case Intrinsic::memmove:
260 case Intrinsic::memcpy:
261 // Don't remove volatile memory intrinsics.
262 return !cast<MemIntrinsic>(II)->isVolatile();
266 if (auto CS = CallSite(I))
267 return CS.getInstruction()->use_empty();
273 /// isShortenable - Returns true if this instruction can be safely shortened in
275 static bool isShortenable(Instruction *I) {
276 // Don't shorten stores for now
277 if (isa<StoreInst>(I))
280 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
281 switch (II->getIntrinsicID()) {
282 default: return false;
283 case Intrinsic::memset:
284 case Intrinsic::memcpy:
285 // Do shorten memory intrinsics.
290 // Don't shorten libcalls calls for now.
295 /// getStoredPointerOperand - Return the pointer that is being written to.
296 static Value *getStoredPointerOperand(Instruction *I) {
297 if (StoreInst *SI = dyn_cast<StoreInst>(I))
298 return SI->getPointerOperand();
299 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
300 return MI->getDest();
302 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
303 switch (II->getIntrinsicID()) {
304 default: llvm_unreachable("Unexpected intrinsic!");
305 case Intrinsic::init_trampoline:
306 return II->getArgOperand(0);
311 // All the supported functions so far happen to have dest as their first
313 return CS.getArgument(0);
316 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
317 const TargetLibraryInfo &TLI) {
319 if (getObjectSize(V, Size, DL, &TLI))
321 return MemoryLocation::UnknownSize;
333 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
334 /// completely overwrites a store to the 'Earlier' location.
335 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
336 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
337 static OverwriteResult isOverwrite(const MemoryLocation &Later,
338 const MemoryLocation &Earlier,
339 const DataLayout &DL,
340 const TargetLibraryInfo &TLI,
341 int64_t &EarlierOff, int64_t &LaterOff) {
342 const Value *P1 = Earlier.Ptr->stripPointerCasts();
343 const Value *P2 = Later.Ptr->stripPointerCasts();
345 // If the start pointers are the same, we just have to compare sizes to see if
346 // the later store was larger than the earlier store.
348 // If we don't know the sizes of either access, then we can't do a
350 if (Later.Size == MemoryLocation::UnknownSize ||
351 Earlier.Size == MemoryLocation::UnknownSize)
352 return OverwriteUnknown;
354 // Make sure that the Later size is >= the Earlier size.
355 if (Later.Size >= Earlier.Size)
356 return OverwriteComplete;
359 // Otherwise, we have to have size information, and the later store has to be
360 // larger than the earlier one.
361 if (Later.Size == MemoryLocation::UnknownSize ||
362 Earlier.Size == MemoryLocation::UnknownSize)
363 return OverwriteUnknown;
365 // Check to see if the later store is to the entire object (either a global,
366 // an alloca, or a byval/inalloca argument). If so, then it clearly
367 // overwrites any other store to the same object.
368 const Value *UO1 = GetUnderlyingObject(P1, DL),
369 *UO2 = GetUnderlyingObject(P2, DL);
371 // If we can't resolve the same pointers to the same object, then we can't
372 // analyze them at all.
374 return OverwriteUnknown;
376 // If the "Later" store is to a recognizable object, get its size.
377 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
378 if (ObjectSize != MemoryLocation::UnknownSize)
379 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
380 return OverwriteComplete;
382 // Okay, we have stores to two completely different pointers. Try to
383 // decompose the pointer into a "base + constant_offset" form. If the base
384 // pointers are equal, then we can reason about the two stores.
387 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
388 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
390 // If the base pointers still differ, we have two completely different stores.
392 return OverwriteUnknown;
394 // The later store completely overlaps the earlier store if:
396 // 1. Both start at the same offset and the later one's size is greater than
397 // or equal to the earlier one's, or
402 // 2. The earlier store has an offset greater than the later offset, but which
403 // still lies completely within the later store.
406 // |----- later ------|
408 // We have to be careful here as *Off is signed while *.Size is unsigned.
409 if (EarlierOff >= LaterOff &&
410 Later.Size >= Earlier.Size &&
411 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
412 return OverwriteComplete;
414 // The other interesting case is if the later store overwrites the end of
420 // In this case we may want to trim the size of earlier to avoid generating
421 // writes to addresses which will definitely be overwritten later
422 if (LaterOff > EarlierOff &&
423 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
424 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
427 // Otherwise, they don't completely overlap.
428 return OverwriteUnknown;
431 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
432 /// memory region into an identical pointer) then it doesn't actually make its
433 /// input dead in the traditional sense. Consider this case:
438 /// In this case, the second store to A does not make the first store to A dead.
439 /// The usual situation isn't an explicit A<-A store like this (which can be
440 /// trivially removed) but a case where two pointers may alias.
442 /// This function detects when it is unsafe to remove a dependent instruction
443 /// because the DSE inducing instruction may be a self-read.
444 static bool isPossibleSelfRead(Instruction *Inst,
445 const MemoryLocation &InstStoreLoc,
446 Instruction *DepWrite,
447 const TargetLibraryInfo &TLI,
449 // Self reads can only happen for instructions that read memory. Get the
451 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
452 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
454 // If the read and written loc obviously don't alias, it isn't a read.
455 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
457 // Okay, 'Inst' may copy over itself. However, we can still remove a the
458 // DepWrite instruction if we can prove that it reads from the same location
459 // as Inst. This handles useful cases like:
462 // Here we don't know if A/B may alias, but we do know that B/B are must
463 // aliases, so removing the first memcpy is safe (assuming it writes <= #
464 // bytes as the second one.
465 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
467 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
470 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
471 // then it can't be considered dead.
476 //===----------------------------------------------------------------------===//
478 //===----------------------------------------------------------------------===//
480 bool DSE::runOnBasicBlock(BasicBlock &BB) {
481 bool MadeChange = false;
483 // Do a top-down walk on the BB.
484 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
485 Instruction *Inst = BBI++;
487 // Handle 'free' calls specially.
488 if (CallInst *F = isFreeCall(Inst, TLI)) {
489 MadeChange |= HandleFree(F);
493 // If we find something that writes memory, get its memory dependence.
494 if (!hasMemoryWrite(Inst, *TLI))
497 MemDepResult InstDep = MD->getDependency(Inst);
499 // Ignore any store where we can't find a local dependence.
500 // FIXME: cross-block DSE would be fun. :)
501 if (!InstDep.isDef() && !InstDep.isClobber())
504 // If we're storing the same value back to a pointer that we just
505 // loaded from, then the store can be removed.
506 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
507 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
508 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
509 SI->getOperand(0) == DepLoad && isRemovable(SI)) {
510 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
511 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
513 // DeleteDeadInstruction can delete the current instruction. Save BBI
514 // in case we need it.
515 WeakVH NextInst(BBI);
517 DeleteDeadInstruction(SI, *MD, *TLI);
519 if (!NextInst) // Next instruction deleted.
521 else if (BBI != BB.begin()) // Revisit this instruction if possible.
530 // Figure out what location is being stored to.
531 MemoryLocation Loc = getLocForWrite(Inst, *AA);
533 // If we didn't get a useful location, fail.
537 while (InstDep.isDef() || InstDep.isClobber()) {
538 // Get the memory clobbered by the instruction we depend on. MemDep will
539 // skip any instructions that 'Loc' clearly doesn't interact with. If we
540 // end up depending on a may- or must-aliased load, then we can't optimize
541 // away the store and we bail out. However, if we depend on on something
542 // that overwrites the memory location we *can* potentially optimize it.
544 // Find out what memory location the dependent instruction stores.
545 Instruction *DepWrite = InstDep.getInst();
546 MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
547 // If we didn't get a useful location, or if it isn't a size, bail out.
551 // If we find a write that is a) removable (i.e., non-volatile), b) is
552 // completely obliterated by the store to 'Loc', and c) which we know that
553 // 'Inst' doesn't load from, then we can remove it.
554 if (isRemovable(DepWrite) &&
555 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
556 int64_t InstWriteOffset, DepWriteOffset;
557 const DataLayout &DL = BB.getModule()->getDataLayout();
559 isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset);
560 if (OR == OverwriteComplete) {
561 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
562 << *DepWrite << "\n KILLER: " << *Inst << '\n');
564 // Delete the store and now-dead instructions that feed it.
565 DeleteDeadInstruction(DepWrite, *MD, *TLI);
569 // DeleteDeadInstruction can delete the current instruction in loop
572 if (BBI != BB.begin())
575 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
576 // TODO: base this on the target vector size so that if the earlier
577 // store was too small to get vector writes anyway then its likely
578 // a good idea to shorten it
579 // Power of 2 vector writes are probably always a bad idea to optimize
580 // as any store/memset/memcpy is likely using vector instructions so
581 // shortening it to not vector size is likely to be slower
582 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
583 unsigned DepWriteAlign = DepIntrinsic->getAlignment();
584 if (llvm::isPowerOf2_64(InstWriteOffset) ||
585 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
587 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
588 << *DepWrite << "\n KILLER (offset "
589 << InstWriteOffset << ", "
590 << DepLoc.Size << ")"
593 Value* DepWriteLength = DepIntrinsic->getLength();
594 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
597 DepIntrinsic->setLength(TrimmedLength);
603 // If this is a may-aliased store that is clobbering the store value, we
604 // can keep searching past it for another must-aliased pointer that stores
605 // to the same location. For example, in:
609 // we can remove the first store to P even though we don't know if P and Q
611 if (DepWrite == &BB.front()) break;
613 // Can't look past this instruction if it might read 'Loc'.
614 if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
617 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
621 // If this block ends in a return, unwind, or unreachable, all allocas are
622 // dead at its end, which means stores to them are also dead.
623 if (BB.getTerminator()->getNumSuccessors() == 0)
624 MadeChange |= handleEndBlock(BB);
629 /// Find all blocks that will unconditionally lead to the block BB and append
631 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
632 BasicBlock *BB, DominatorTree *DT) {
633 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
634 BasicBlock *Pred = *I;
635 if (Pred == BB) continue;
636 TerminatorInst *PredTI = Pred->getTerminator();
637 if (PredTI->getNumSuccessors() != 1)
640 if (DT->isReachableFromEntry(Pred))
641 Blocks.push_back(Pred);
645 /// HandleFree - Handle frees of entire structures whose dependency is a store
646 /// to a field of that structure.
647 bool DSE::HandleFree(CallInst *F) {
648 bool MadeChange = false;
650 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
651 SmallVector<BasicBlock *, 16> Blocks;
652 Blocks.push_back(F->getParent());
653 const DataLayout &DL = F->getModule()->getDataLayout();
655 while (!Blocks.empty()) {
656 BasicBlock *BB = Blocks.pop_back_val();
657 Instruction *InstPt = BB->getTerminator();
658 if (BB == F->getParent()) InstPt = F;
660 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
661 while (Dep.isDef() || Dep.isClobber()) {
662 Instruction *Dependency = Dep.getInst();
663 if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
667 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
669 // Check for aliasing.
670 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
673 Instruction *Next = std::next(BasicBlock::iterator(Dependency));
675 // DCE instructions only used to calculate that store
676 DeleteDeadInstruction(Dependency, *MD, *TLI);
680 // Inst's old Dependency is now deleted. Compute the next dependency,
681 // which may also be dead, as in
683 // s[1] = 0; // This has just been deleted.
685 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
688 if (Dep.isNonLocal())
689 FindUnconditionalPreds(Blocks, BB, DT);
695 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
696 /// function end block. Ex:
699 /// store i32 1, i32* %A
701 bool DSE::handleEndBlock(BasicBlock &BB) {
702 bool MadeChange = false;
704 // Keep track of all of the stack objects that are dead at the end of the
706 SmallSetVector<Value*, 16> DeadStackObjects;
708 // Find all of the alloca'd pointers in the entry block.
709 BasicBlock *Entry = BB.getParent()->begin();
710 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
711 if (isa<AllocaInst>(I))
712 DeadStackObjects.insert(I);
714 // Okay, so these are dead heap objects, but if the pointer never escapes
715 // then it's leaked by this function anyways.
716 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
717 DeadStackObjects.insert(I);
720 // Treat byval or inalloca arguments the same, stores to them are dead at the
721 // end of the function.
722 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
723 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
724 if (AI->hasByValOrInAllocaAttr())
725 DeadStackObjects.insert(AI);
727 const DataLayout &DL = BB.getModule()->getDataLayout();
729 // Scan the basic block backwards
730 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
733 // If we find a store, check to see if it points into a dead stack value.
734 if (hasMemoryWrite(BBI, *TLI) && isRemovable(BBI)) {
735 // See through pointer-to-pointer bitcasts
736 SmallVector<Value *, 4> Pointers;
737 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
739 // Stores to stack values are valid candidates for removal.
741 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
742 E = Pointers.end(); I != E; ++I)
743 if (!DeadStackObjects.count(*I)) {
749 Instruction *Dead = BBI++;
751 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
752 << *Dead << "\n Objects: ";
753 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
754 E = Pointers.end(); I != E; ++I) {
756 if (std::next(I) != E)
761 // DCE instructions only used to calculate that store.
762 DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects);
769 // Remove any dead non-memory-mutating instructions.
770 if (isInstructionTriviallyDead(BBI, TLI)) {
771 Instruction *Inst = BBI++;
772 DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects);
778 if (isa<AllocaInst>(BBI)) {
779 // Remove allocas from the list of dead stack objects; there can't be
780 // any references before the definition.
781 DeadStackObjects.remove(BBI);
785 if (auto CS = CallSite(BBI)) {
786 // Remove allocation function calls from the list of dead stack objects;
787 // there can't be any references before the definition.
788 if (isAllocLikeFn(BBI, TLI))
789 DeadStackObjects.remove(BBI);
791 // If this call does not access memory, it can't be loading any of our
793 if (AA->doesNotAccessMemory(CS))
796 // If the call might load from any of our allocas, then any store above
798 DeadStackObjects.remove_if([&](Value *I) {
799 // See if the call site touches the value.
800 ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
802 return A == MRI_ModRef || A == MRI_Ref;
805 // If all of the allocas were clobbered by the call then we're not going
806 // to find anything else to process.
807 if (DeadStackObjects.empty())
813 MemoryLocation LoadedLoc;
815 // If we encounter a use of the pointer, it is no longer considered dead
816 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
817 if (!L->isUnordered()) // Be conservative with atomic/volatile load
819 LoadedLoc = MemoryLocation::get(L);
820 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
821 LoadedLoc = MemoryLocation::get(V);
822 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
823 LoadedLoc = MemoryLocation::getForSource(MTI);
824 } else if (!BBI->mayReadFromMemory()) {
825 // Instruction doesn't read memory. Note that stores that weren't removed
826 // above will hit this case.
829 // Unknown inst; assume it clobbers everything.
833 // Remove any allocas from the DeadPointer set that are loaded, as this
834 // makes any stores above the access live.
835 RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
837 // If all of the allocas were clobbered by the access then we're not going
838 // to find anything else to process.
839 if (DeadStackObjects.empty())
846 /// RemoveAccessedObjects - Check to see if the specified location may alias any
847 /// of the stack objects in the DeadStackObjects set. If so, they become live
848 /// because the location is being loaded.
849 void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
850 SmallSetVector<Value *, 16> &DeadStackObjects,
851 const DataLayout &DL) {
852 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
854 // A constant can't be in the dead pointer set.
855 if (isa<Constant>(UnderlyingPointer))
858 // If the kill pointer can be easily reduced to an alloca, don't bother doing
859 // extraneous AA queries.
860 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
861 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
865 // Remove objects that could alias LoadedLoc.
866 DeadStackObjects.remove_if([&](Value *I) {
867 // See if the loaded location could alias the stack location.
868 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
869 return !AA->isNoAlias(StackLoc, LoadedLoc);