1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/Local.h"
41 #define DEBUG_TYPE "dse"
43 STATISTIC(NumFastStores, "Number of stores deleted");
44 STATISTIC(NumFastOther , "Number of other instrs removed");
47 struct DSE : public FunctionPass {
49 MemoryDependenceAnalysis *MD;
51 const TargetLibraryInfo *TLI;
53 static char ID; // Pass identification, replacement for typeid
54 DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
55 initializeDSEPass(*PassRegistry::getPassRegistry());
58 bool runOnFunction(Function &F) override {
59 if (skipOptnoneFunction(F))
62 AA = &getAnalysis<AliasAnalysis>();
63 MD = &getAnalysis<MemoryDependenceAnalysis>();
64 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
65 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
68 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
69 // Only check non-dead blocks. Dead blocks may have strange pointer
70 // cycles that will confuse alias analysis.
71 if (DT->isReachableFromEntry(I))
72 Changed |= runOnBasicBlock(*I);
74 AA = nullptr; MD = nullptr; DT = nullptr;
78 bool runOnBasicBlock(BasicBlock &BB);
79 bool HandleFree(CallInst *F);
80 bool handleEndBlock(BasicBlock &BB);
81 void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
82 SmallSetVector<Value *, 16> &DeadStackObjects,
83 const DataLayout &DL);
85 void getAnalysisUsage(AnalysisUsage &AU) const override {
87 AU.addRequired<DominatorTreeWrapperPass>();
88 AU.addRequired<AliasAnalysis>();
89 AU.addRequired<MemoryDependenceAnalysis>();
90 AU.addRequired<TargetLibraryInfoWrapperPass>();
91 AU.addPreserved<AliasAnalysis>();
92 AU.addPreserved<DominatorTreeWrapperPass>();
93 AU.addPreserved<MemoryDependenceAnalysis>();
99 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
100 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
101 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
102 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
103 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
104 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
106 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
108 //===----------------------------------------------------------------------===//
110 //===----------------------------------------------------------------------===//
112 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
113 /// and zero out all the operands of this instruction. If any of them become
114 /// dead, delete them and the computation tree that feeds them.
116 /// If ValueSet is non-null, remove any deleted instructions from it as well.
118 static void DeleteDeadInstruction(Instruction *I,
119 MemoryDependenceAnalysis &MD,
120 const TargetLibraryInfo &TLI,
121 SmallSetVector<Value*, 16> *ValueSet = nullptr) {
122 SmallVector<Instruction*, 32> NowDeadInsts;
124 NowDeadInsts.push_back(I);
127 // Before we touch this instruction, remove it from memdep!
129 Instruction *DeadInst = NowDeadInsts.pop_back_val();
132 // This instruction is dead, zap it, in stages. Start by removing it from
133 // MemDep, which needs to know the operands and needs it to be in the
135 MD.removeInstruction(DeadInst);
137 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
138 Value *Op = DeadInst->getOperand(op);
139 DeadInst->setOperand(op, nullptr);
141 // If this operand just became dead, add it to the NowDeadInsts list.
142 if (!Op->use_empty()) continue;
144 if (Instruction *OpI = dyn_cast<Instruction>(Op))
145 if (isInstructionTriviallyDead(OpI, &TLI))
146 NowDeadInsts.push_back(OpI);
149 DeadInst->eraseFromParent();
151 if (ValueSet) ValueSet->remove(DeadInst);
152 } while (!NowDeadInsts.empty());
156 /// hasMemoryWrite - Does this instruction write some memory? This only returns
157 /// true for things that we can analyze with other helpers below.
158 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
159 if (isa<StoreInst>(I))
161 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
162 switch (II->getIntrinsicID()) {
165 case Intrinsic::memset:
166 case Intrinsic::memmove:
167 case Intrinsic::memcpy:
168 case Intrinsic::init_trampoline:
169 case Intrinsic::lifetime_end:
173 if (auto CS = CallSite(I)) {
174 if (Function *F = CS.getCalledFunction()) {
175 if (TLI.has(LibFunc::strcpy) &&
176 F->getName() == TLI.getName(LibFunc::strcpy)) {
179 if (TLI.has(LibFunc::strncpy) &&
180 F->getName() == TLI.getName(LibFunc::strncpy)) {
183 if (TLI.has(LibFunc::strcat) &&
184 F->getName() == TLI.getName(LibFunc::strcat)) {
187 if (TLI.has(LibFunc::strncat) &&
188 F->getName() == TLI.getName(LibFunc::strncat)) {
196 /// getLocForWrite - Return a Location stored to by the specified instruction.
197 /// If isRemovable returns true, this function and getLocForRead completely
198 /// describe the memory operations for this instruction.
199 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
200 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
201 return MemoryLocation::get(SI);
203 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
204 // memcpy/memmove/memset.
205 MemoryLocation Loc = MemoryLocation::getForDest(MI);
209 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
211 return MemoryLocation();
213 switch (II->getIntrinsicID()) {
215 return MemoryLocation(); // Unhandled intrinsic.
216 case Intrinsic::init_trampoline:
217 // FIXME: We don't know the size of the trampoline, so we can't really
219 return MemoryLocation(II->getArgOperand(0));
220 case Intrinsic::lifetime_end: {
221 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
222 return MemoryLocation(II->getArgOperand(1), Len);
227 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
228 /// instruction if any.
229 static MemoryLocation getLocForRead(Instruction *Inst,
230 const TargetLibraryInfo &TLI) {
231 assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
233 // The only instructions that both read and write are the mem transfer
234 // instructions (memcpy/memmove).
235 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
236 return MemoryLocation::getForSource(MTI);
237 return MemoryLocation();
241 /// isRemovable - If the value of this instruction and the memory it writes to
242 /// is unused, may we delete this instruction?
243 static bool isRemovable(Instruction *I) {
244 // Don't remove volatile/atomic stores.
245 if (StoreInst *SI = dyn_cast<StoreInst>(I))
246 return SI->isUnordered();
248 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
249 switch (II->getIntrinsicID()) {
250 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
251 case Intrinsic::lifetime_end:
252 // Never remove dead lifetime_end's, e.g. because it is followed by a
255 case Intrinsic::init_trampoline:
256 // Always safe to remove init_trampoline.
259 case Intrinsic::memset:
260 case Intrinsic::memmove:
261 case Intrinsic::memcpy:
262 // Don't remove volatile memory intrinsics.
263 return !cast<MemIntrinsic>(II)->isVolatile();
267 if (auto CS = CallSite(I))
268 return CS.getInstruction()->use_empty();
274 /// isShortenable - Returns true if this instruction can be safely shortened in
276 static bool isShortenable(Instruction *I) {
277 // Don't shorten stores for now
278 if (isa<StoreInst>(I))
281 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
282 switch (II->getIntrinsicID()) {
283 default: return false;
284 case Intrinsic::memset:
285 case Intrinsic::memcpy:
286 // Do shorten memory intrinsics.
291 // Don't shorten libcalls calls for now.
296 /// getStoredPointerOperand - Return the pointer that is being written to.
297 static Value *getStoredPointerOperand(Instruction *I) {
298 if (StoreInst *SI = dyn_cast<StoreInst>(I))
299 return SI->getPointerOperand();
300 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
301 return MI->getDest();
303 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
304 switch (II->getIntrinsicID()) {
305 default: llvm_unreachable("Unexpected intrinsic!");
306 case Intrinsic::init_trampoline:
307 return II->getArgOperand(0);
312 // All the supported functions so far happen to have dest as their first
314 return CS.getArgument(0);
317 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
318 const TargetLibraryInfo &TLI) {
320 if (getObjectSize(V, Size, DL, &TLI))
322 return MemoryLocation::UnknownSize;
334 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
335 /// completely overwrites a store to the 'Earlier' location.
336 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
337 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
338 static OverwriteResult isOverwrite(const MemoryLocation &Later,
339 const MemoryLocation &Earlier,
340 const DataLayout &DL,
341 const TargetLibraryInfo &TLI,
342 int64_t &EarlierOff, int64_t &LaterOff) {
343 const Value *P1 = Earlier.Ptr->stripPointerCasts();
344 const Value *P2 = Later.Ptr->stripPointerCasts();
346 // If the start pointers are the same, we just have to compare sizes to see if
347 // the later store was larger than the earlier store.
349 // If we don't know the sizes of either access, then we can't do a
351 if (Later.Size == MemoryLocation::UnknownSize ||
352 Earlier.Size == MemoryLocation::UnknownSize)
353 return OverwriteUnknown;
355 // Make sure that the Later size is >= the Earlier size.
356 if (Later.Size >= Earlier.Size)
357 return OverwriteComplete;
360 // Otherwise, we have to have size information, and the later store has to be
361 // larger than the earlier one.
362 if (Later.Size == MemoryLocation::UnknownSize ||
363 Earlier.Size == MemoryLocation::UnknownSize)
364 return OverwriteUnknown;
366 // Check to see if the later store is to the entire object (either a global,
367 // an alloca, or a byval/inalloca argument). If so, then it clearly
368 // overwrites any other store to the same object.
369 const Value *UO1 = GetUnderlyingObject(P1, DL),
370 *UO2 = GetUnderlyingObject(P2, DL);
372 // If we can't resolve the same pointers to the same object, then we can't
373 // analyze them at all.
375 return OverwriteUnknown;
377 // If the "Later" store is to a recognizable object, get its size.
378 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
379 if (ObjectSize != MemoryLocation::UnknownSize)
380 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
381 return OverwriteComplete;
383 // Okay, we have stores to two completely different pointers. Try to
384 // decompose the pointer into a "base + constant_offset" form. If the base
385 // pointers are equal, then we can reason about the two stores.
388 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
389 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
391 // If the base pointers still differ, we have two completely different stores.
393 return OverwriteUnknown;
395 // The later store completely overlaps the earlier store if:
397 // 1. Both start at the same offset and the later one's size is greater than
398 // or equal to the earlier one's, or
403 // 2. The earlier store has an offset greater than the later offset, but which
404 // still lies completely within the later store.
407 // |----- later ------|
409 // We have to be careful here as *Off is signed while *.Size is unsigned.
410 if (EarlierOff >= LaterOff &&
411 Later.Size >= Earlier.Size &&
412 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
413 return OverwriteComplete;
415 // The other interesting case is if the later store overwrites the end of
421 // In this case we may want to trim the size of earlier to avoid generating
422 // writes to addresses which will definitely be overwritten later
423 if (LaterOff > EarlierOff &&
424 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
425 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
428 // Otherwise, they don't completely overlap.
429 return OverwriteUnknown;
432 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
433 /// memory region into an identical pointer) then it doesn't actually make its
434 /// input dead in the traditional sense. Consider this case:
439 /// In this case, the second store to A does not make the first store to A dead.
440 /// The usual situation isn't an explicit A<-A store like this (which can be
441 /// trivially removed) but a case where two pointers may alias.
443 /// This function detects when it is unsafe to remove a dependent instruction
444 /// because the DSE inducing instruction may be a self-read.
445 static bool isPossibleSelfRead(Instruction *Inst,
446 const MemoryLocation &InstStoreLoc,
447 Instruction *DepWrite,
448 const TargetLibraryInfo &TLI,
450 // Self reads can only happen for instructions that read memory. Get the
452 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
453 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
455 // If the read and written loc obviously don't alias, it isn't a read.
456 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
458 // Okay, 'Inst' may copy over itself. However, we can still remove a the
459 // DepWrite instruction if we can prove that it reads from the same location
460 // as Inst. This handles useful cases like:
463 // Here we don't know if A/B may alias, but we do know that B/B are must
464 // aliases, so removing the first memcpy is safe (assuming it writes <= #
465 // bytes as the second one.
466 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
468 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
471 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
472 // then it can't be considered dead.
477 //===----------------------------------------------------------------------===//
479 //===----------------------------------------------------------------------===//
481 bool DSE::runOnBasicBlock(BasicBlock &BB) {
482 bool MadeChange = false;
484 // Do a top-down walk on the BB.
485 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
486 Instruction *Inst = BBI++;
488 // Handle 'free' calls specially.
489 if (CallInst *F = isFreeCall(Inst, TLI)) {
490 MadeChange |= HandleFree(F);
494 // If we find something that writes memory, get its memory dependence.
495 if (!hasMemoryWrite(Inst, *TLI))
498 MemDepResult InstDep = MD->getDependency(Inst);
500 // Ignore any store where we can't find a local dependence.
501 // FIXME: cross-block DSE would be fun. :)
502 if (!InstDep.isDef() && !InstDep.isClobber())
505 // If we're storing the same value back to a pointer that we just
506 // loaded from, then the store can be removed.
507 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
508 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
509 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
510 SI->getOperand(0) == DepLoad && isRemovable(SI)) {
511 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
512 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
514 // DeleteDeadInstruction can delete the current instruction. Save BBI
515 // in case we need it.
516 WeakVH NextInst(BBI);
518 DeleteDeadInstruction(SI, *MD, *TLI);
520 if (!NextInst) // Next instruction deleted.
522 else if (BBI != BB.begin()) // Revisit this instruction if possible.
531 // Figure out what location is being stored to.
532 MemoryLocation Loc = getLocForWrite(Inst, *AA);
534 // If we didn't get a useful location, fail.
538 while (InstDep.isDef() || InstDep.isClobber()) {
539 // Get the memory clobbered by the instruction we depend on. MemDep will
540 // skip any instructions that 'Loc' clearly doesn't interact with. If we
541 // end up depending on a may- or must-aliased load, then we can't optimize
542 // away the store and we bail out. However, if we depend on on something
543 // that overwrites the memory location we *can* potentially optimize it.
545 // Find out what memory location the dependent instruction stores.
546 Instruction *DepWrite = InstDep.getInst();
547 MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
548 // If we didn't get a useful location, or if it isn't a size, bail out.
552 // If we find a write that is a) removable (i.e., non-volatile), b) is
553 // completely obliterated by the store to 'Loc', and c) which we know that
554 // 'Inst' doesn't load from, then we can remove it.
555 if (isRemovable(DepWrite) &&
556 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
557 int64_t InstWriteOffset, DepWriteOffset;
558 const DataLayout &DL = BB.getModule()->getDataLayout();
560 isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset);
561 if (OR == OverwriteComplete) {
562 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
563 << *DepWrite << "\n KILLER: " << *Inst << '\n');
565 // Delete the store and now-dead instructions that feed it.
566 DeleteDeadInstruction(DepWrite, *MD, *TLI);
570 // DeleteDeadInstruction can delete the current instruction in loop
573 if (BBI != BB.begin())
576 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
577 // TODO: base this on the target vector size so that if the earlier
578 // store was too small to get vector writes anyway then its likely
579 // a good idea to shorten it
580 // Power of 2 vector writes are probably always a bad idea to optimize
581 // as any store/memset/memcpy is likely using vector instructions so
582 // shortening it to not vector size is likely to be slower
583 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
584 unsigned DepWriteAlign = DepIntrinsic->getAlignment();
585 if (llvm::isPowerOf2_64(InstWriteOffset) ||
586 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
588 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
589 << *DepWrite << "\n KILLER (offset "
590 << InstWriteOffset << ", "
591 << DepLoc.Size << ")"
594 Value* DepWriteLength = DepIntrinsic->getLength();
595 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
598 DepIntrinsic->setLength(TrimmedLength);
604 // If this is a may-aliased store that is clobbering the store value, we
605 // can keep searching past it for another must-aliased pointer that stores
606 // to the same location. For example, in:
610 // we can remove the first store to P even though we don't know if P and Q
612 if (DepWrite == &BB.front()) break;
614 // Can't look past this instruction if it might read 'Loc'.
615 if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
618 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
622 // If this block ends in a return, unwind, or unreachable, all allocas are
623 // dead at its end, which means stores to them are also dead.
624 if (BB.getTerminator()->getNumSuccessors() == 0)
625 MadeChange |= handleEndBlock(BB);
630 /// Find all blocks that will unconditionally lead to the block BB and append
632 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
633 BasicBlock *BB, DominatorTree *DT) {
634 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
635 BasicBlock *Pred = *I;
636 if (Pred == BB) continue;
637 TerminatorInst *PredTI = Pred->getTerminator();
638 if (PredTI->getNumSuccessors() != 1)
641 if (DT->isReachableFromEntry(Pred))
642 Blocks.push_back(Pred);
646 /// HandleFree - Handle frees of entire structures whose dependency is a store
647 /// to a field of that structure.
648 bool DSE::HandleFree(CallInst *F) {
649 bool MadeChange = false;
651 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
652 SmallVector<BasicBlock *, 16> Blocks;
653 Blocks.push_back(F->getParent());
654 const DataLayout &DL = F->getModule()->getDataLayout();
656 while (!Blocks.empty()) {
657 BasicBlock *BB = Blocks.pop_back_val();
658 Instruction *InstPt = BB->getTerminator();
659 if (BB == F->getParent()) InstPt = F;
661 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
662 while (Dep.isDef() || Dep.isClobber()) {
663 Instruction *Dependency = Dep.getInst();
664 if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
668 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
670 // Check for aliasing.
671 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
674 Instruction *Next = std::next(BasicBlock::iterator(Dependency));
676 // DCE instructions only used to calculate that store
677 DeleteDeadInstruction(Dependency, *MD, *TLI);
681 // Inst's old Dependency is now deleted. Compute the next dependency,
682 // which may also be dead, as in
684 // s[1] = 0; // This has just been deleted.
686 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
689 if (Dep.isNonLocal())
690 FindUnconditionalPreds(Blocks, BB, DT);
696 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
697 /// function end block. Ex:
700 /// store i32 1, i32* %A
702 bool DSE::handleEndBlock(BasicBlock &BB) {
703 bool MadeChange = false;
705 // Keep track of all of the stack objects that are dead at the end of the
707 SmallSetVector<Value*, 16> DeadStackObjects;
709 // Find all of the alloca'd pointers in the entry block.
710 BasicBlock *Entry = BB.getParent()->begin();
711 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
712 if (isa<AllocaInst>(I))
713 DeadStackObjects.insert(I);
715 // Okay, so these are dead heap objects, but if the pointer never escapes
716 // then it's leaked by this function anyways.
717 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
718 DeadStackObjects.insert(I);
721 // Treat byval or inalloca arguments the same, stores to them are dead at the
722 // end of the function.
723 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
724 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
725 if (AI->hasByValOrInAllocaAttr())
726 DeadStackObjects.insert(AI);
728 const DataLayout &DL = BB.getModule()->getDataLayout();
730 // Scan the basic block backwards
731 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
734 // If we find a store, check to see if it points into a dead stack value.
735 if (hasMemoryWrite(BBI, *TLI) && isRemovable(BBI)) {
736 // See through pointer-to-pointer bitcasts
737 SmallVector<Value *, 4> Pointers;
738 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
740 // Stores to stack values are valid candidates for removal.
742 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
743 E = Pointers.end(); I != E; ++I)
744 if (!DeadStackObjects.count(*I)) {
750 Instruction *Dead = BBI++;
752 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
753 << *Dead << "\n Objects: ";
754 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
755 E = Pointers.end(); I != E; ++I) {
757 if (std::next(I) != E)
762 // DCE instructions only used to calculate that store.
763 DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects);
770 // Remove any dead non-memory-mutating instructions.
771 if (isInstructionTriviallyDead(BBI, TLI)) {
772 Instruction *Inst = BBI++;
773 DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects);
779 if (isa<AllocaInst>(BBI)) {
780 // Remove allocas from the list of dead stack objects; there can't be
781 // any references before the definition.
782 DeadStackObjects.remove(BBI);
786 if (auto CS = CallSite(BBI)) {
787 // Remove allocation function calls from the list of dead stack objects;
788 // there can't be any references before the definition.
789 if (isAllocLikeFn(BBI, TLI))
790 DeadStackObjects.remove(BBI);
792 // If this call does not access memory, it can't be loading any of our
794 if (AA->doesNotAccessMemory(CS))
797 // If the call might load from any of our allocas, then any store above
799 DeadStackObjects.remove_if([&](Value *I) {
800 // See if the call site touches the value.
801 ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
803 return A == MRI_ModRef || A == MRI_Ref;
806 // If all of the allocas were clobbered by the call then we're not going
807 // to find anything else to process.
808 if (DeadStackObjects.empty())
814 MemoryLocation LoadedLoc;
816 // If we encounter a use of the pointer, it is no longer considered dead
817 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
818 if (!L->isUnordered()) // Be conservative with atomic/volatile load
820 LoadedLoc = MemoryLocation::get(L);
821 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
822 LoadedLoc = MemoryLocation::get(V);
823 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
824 LoadedLoc = MemoryLocation::getForSource(MTI);
825 } else if (!BBI->mayReadFromMemory()) {
826 // Instruction doesn't read memory. Note that stores that weren't removed
827 // above will hit this case.
830 // Unknown inst; assume it clobbers everything.
834 // Remove any allocas from the DeadPointer set that are loaded, as this
835 // makes any stores above the access live.
836 RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
838 // If all of the allocas were clobbered by the access then we're not going
839 // to find anything else to process.
840 if (DeadStackObjects.empty())
847 /// RemoveAccessedObjects - Check to see if the specified location may alias any
848 /// of the stack objects in the DeadStackObjects set. If so, they become live
849 /// because the location is being loaded.
850 void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
851 SmallSetVector<Value *, 16> &DeadStackObjects,
852 const DataLayout &DL) {
853 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
855 // A constant can't be in the dead pointer set.
856 if (isa<Constant>(UnderlyingPointer))
859 // If the kill pointer can be easily reduced to an alloca, don't bother doing
860 // extraneous AA queries.
861 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
862 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
866 // Remove objects that could alias LoadedLoc.
867 DeadStackObjects.remove_if([&](Value *I) {
868 // See if the loaded location could alias the stack location.
869 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
870 return !AA->isNoAlias(StackLoc, LoadedLoc);