1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "dse"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/IntrinsicInst.h"
24 #include "llvm/Pass.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/Dominators.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Transforms/Utils/Local.h"
36 STATISTIC(NumFastStores, "Number of stores deleted");
37 STATISTIC(NumFastOther , "Number of other instrs removed");
40 struct DSE : public FunctionPass {
42 MemoryDependenceAnalysis *MD;
44 static char ID; // Pass identification, replacement for typeid
45 DSE() : FunctionPass(ID), AA(0), MD(0) {
46 initializeDSEPass(*PassRegistry::getPassRegistry());
49 virtual bool runOnFunction(Function &F) {
50 AA = &getAnalysis<AliasAnalysis>();
51 MD = &getAnalysis<MemoryDependenceAnalysis>();
52 DominatorTree &DT = getAnalysis<DominatorTree>();
55 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
56 // Only check non-dead blocks. Dead blocks may have strange pointer
57 // cycles that will confuse alias analysis.
58 if (DT.isReachableFromEntry(I))
59 Changed |= runOnBasicBlock(*I);
65 bool runOnBasicBlock(BasicBlock &BB);
66 bool HandleFree(CallInst *F);
67 bool handleEndBlock(BasicBlock &BB);
68 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
69 SmallPtrSet<Value*, 16> &DeadStackObjects);
72 // getAnalysisUsage - We require post dominance frontiers (aka Control
74 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
76 AU.addRequired<DominatorTree>();
77 AU.addRequired<AliasAnalysis>();
78 AU.addRequired<MemoryDependenceAnalysis>();
79 AU.addPreserved<AliasAnalysis>();
80 AU.addPreserved<DominatorTree>();
81 AU.addPreserved<MemoryDependenceAnalysis>();
87 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
88 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
89 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
90 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
91 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
93 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
95 //===----------------------------------------------------------------------===//
97 //===----------------------------------------------------------------------===//
99 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
100 /// and zero out all the operands of this instruction. If any of them become
101 /// dead, delete them and the computation tree that feeds them.
103 /// If ValueSet is non-null, remove any deleted instructions from it as well.
105 static void DeleteDeadInstruction(Instruction *I,
106 MemoryDependenceAnalysis &MD,
107 SmallPtrSet<Value*, 16> *ValueSet = 0) {
108 SmallVector<Instruction*, 32> NowDeadInsts;
110 NowDeadInsts.push_back(I);
113 // Before we touch this instruction, remove it from memdep!
115 Instruction *DeadInst = NowDeadInsts.pop_back_val();
118 // This instruction is dead, zap it, in stages. Start by removing it from
119 // MemDep, which needs to know the operands and needs it to be in the
121 MD.removeInstruction(DeadInst);
123 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
124 Value *Op = DeadInst->getOperand(op);
125 DeadInst->setOperand(op, 0);
127 // If this operand just became dead, add it to the NowDeadInsts list.
128 if (!Op->use_empty()) continue;
130 if (Instruction *OpI = dyn_cast<Instruction>(Op))
131 if (isInstructionTriviallyDead(OpI))
132 NowDeadInsts.push_back(OpI);
135 DeadInst->eraseFromParent();
137 if (ValueSet) ValueSet->erase(DeadInst);
138 } while (!NowDeadInsts.empty());
142 /// hasMemoryWrite - Does this instruction write some memory? This only returns
143 /// true for things that we can analyze with other helpers below.
144 static bool hasMemoryWrite(Instruction *I) {
145 if (isa<StoreInst>(I))
147 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
148 switch (II->getIntrinsicID()) {
151 case Intrinsic::memset:
152 case Intrinsic::memmove:
153 case Intrinsic::memcpy:
154 case Intrinsic::init_trampoline:
155 case Intrinsic::lifetime_end:
162 /// getLocForWrite - Return a Location stored to by the specified instruction.
163 static AliasAnalysis::Location
164 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
165 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
166 return AA.getLocation(SI);
168 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
169 // memcpy/memmove/memset.
170 AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
171 // If we don't have target data around, an unknown size in Location means
172 // that we should use the size of the pointee type. This isn't valid for
173 // memset/memcpy, which writes more than an i8.
174 if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
175 return AliasAnalysis::Location();
179 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
180 if (II == 0) return AliasAnalysis::Location();
182 switch (II->getIntrinsicID()) {
183 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
184 case Intrinsic::init_trampoline:
185 // If we don't have target data around, an unknown size in Location means
186 // that we should use the size of the pointee type. This isn't valid for
187 // init.trampoline, which writes more than an i8.
188 if (AA.getTargetData() == 0) return AliasAnalysis::Location();
190 // FIXME: We don't know the size of the trampoline, so we can't really
192 return AliasAnalysis::Location(II->getArgOperand(0));
193 case Intrinsic::lifetime_end: {
194 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
195 return AliasAnalysis::Location(II->getArgOperand(1), Len);
200 /// isRemovable - If the value of this instruction and the memory it writes to
201 /// is unused, may we delete this instruction?
202 static bool isRemovable(Instruction *I) {
203 // Don't remove volatile stores.
204 if (StoreInst *SI = dyn_cast<StoreInst>(I))
205 return !SI->isVolatile();
207 IntrinsicInst *II = cast<IntrinsicInst>(I);
208 switch (II->getIntrinsicID()) {
209 default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
210 case Intrinsic::lifetime_end:
211 // Never remove dead lifetime_end's, e.g. because it is followed by a
214 case Intrinsic::init_trampoline:
215 // Always safe to remove init_trampoline.
218 case Intrinsic::memset:
219 case Intrinsic::memmove:
220 case Intrinsic::memcpy:
221 // Don't remove volatile memory intrinsics.
222 return !cast<MemIntrinsic>(II)->isVolatile();
226 /// getStoredPointerOperand - Return the pointer that is being written to.
227 static Value *getStoredPointerOperand(Instruction *I) {
228 if (StoreInst *SI = dyn_cast<StoreInst>(I))
229 return SI->getPointerOperand();
230 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
231 return MI->getDest();
233 IntrinsicInst *II = cast<IntrinsicInst>(I);
234 switch (II->getIntrinsicID()) {
235 default: assert(false && "Unexpected intrinsic!");
236 case Intrinsic::init_trampoline:
237 return II->getArgOperand(0);
241 static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
242 const TargetData *TD = AA.getTargetData();
244 return AliasAnalysis::UnknownSize;
246 if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
247 // Get size information for the alloca
248 if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
249 return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
250 return AliasAnalysis::UnknownSize;
253 assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
254 const PointerType *PT = cast<PointerType>(V->getType());
255 return TD->getTypeAllocSize(PT->getElementType());
259 /// isCompleteOverwrite - Return true if a store to the 'Later' location
260 /// completely overwrites a store to the 'Earlier' location.
261 static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
262 const AliasAnalysis::Location &Earlier,
264 const Value *P1 = Earlier.Ptr->stripPointerCasts();
265 const Value *P2 = Later.Ptr->stripPointerCasts();
267 // If the start pointers are the same, we just have to compare sizes to see if
268 // the later store was larger than the earlier store.
270 // If we don't know the sizes of either access, then we can't do a
272 if (Later.Size == AliasAnalysis::UnknownSize ||
273 Earlier.Size == AliasAnalysis::UnknownSize) {
274 // If we have no TargetData information around, then the size of the store
275 // is inferrable from the pointee type. If they are the same type, then
276 // we know that the store is safe.
277 if (AA.getTargetData() == 0)
278 return Later.Ptr->getType() == Earlier.Ptr->getType();
282 // Make sure that the Later size is >= the Earlier size.
283 if (Later.Size < Earlier.Size)
288 // Otherwise, we have to have size information, and the later store has to be
289 // larger than the earlier one.
290 if (Later.Size == AliasAnalysis::UnknownSize ||
291 Earlier.Size == AliasAnalysis::UnknownSize ||
292 Later.Size <= Earlier.Size ||
293 AA.getTargetData() == 0)
296 const TargetData &TD = *AA.getTargetData();
298 // Okay, we have stores to two completely different pointers. Try to
299 // decompose the pointer into a "base + constant_offset" form. If the base
300 // pointers are equal, then we can reason about the two stores.
301 int64_t Off1 = 0, Off2 = 0;
302 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, Off1, TD);
303 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, Off2, TD);
305 // If the base pointers still differ, we have two completely different stores.
309 // Otherwise, we might have a situation like:
310 // store i16 -> P + 1 Byte
312 // In this case, we see if the later store completely overlaps all bytes
313 // stored by the previous store.
314 if (Off1 < Off2 || // Earlier starts before Later.
315 Off1+Earlier.Size > Off2+Later.Size) // Earlier goes beyond Later.
317 // Otherwise, we have complete overlap.
322 //===----------------------------------------------------------------------===//
324 //===----------------------------------------------------------------------===//
326 bool DSE::runOnBasicBlock(BasicBlock &BB) {
327 bool MadeChange = false;
329 // Do a top-down walk on the BB.
330 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
331 Instruction *Inst = BBI++;
333 // Handle 'free' calls specially.
334 if (CallInst *F = isFreeCall(Inst)) {
335 MadeChange |= HandleFree(F);
339 // If we find something that writes memory, get its memory dependence.
340 if (!hasMemoryWrite(Inst))
343 MemDepResult InstDep = MD->getDependency(Inst);
345 // Ignore non-local store liveness.
346 // FIXME: cross-block DSE would be fun. :)
347 if (InstDep.isNonLocal() ||
348 // Ignore self dependence, which happens in the entry block of the
350 InstDep.getInst() == Inst)
353 // If we're storing the same value back to a pointer that we just
354 // loaded from, then the store can be removed.
355 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
356 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
357 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
358 SI->getOperand(0) == DepLoad && !SI->isVolatile()) {
359 // DeleteDeadInstruction can delete the current instruction. Save BBI
360 // in case we need it.
361 WeakVH NextInst(BBI);
363 DeleteDeadInstruction(SI, *MD);
365 if (NextInst == 0) // Next instruction deleted.
367 else if (BBI != BB.begin()) // Revisit this instruction if possible.
376 // Figure out what location is being stored to.
377 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
379 // If we didn't get a useful location, fail.
383 while (!InstDep.isNonLocal()) {
384 // Get the memory clobbered by the instruction we depend on. MemDep will
385 // skip any instructions that 'Loc' clearly doesn't interact with. If we
386 // end up depending on a may- or must-aliased load, then we can't optimize
387 // away the store and we bail out. However, if we depend on on something
388 // that overwrites the memory location we *can* potentially optimize it.
390 // Find out what memory location the dependant instruction stores.
391 Instruction *DepWrite = InstDep.getInst();
392 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
393 // If we didn't get a useful location, or if it isn't a size, bail out.
397 // If we find a removable write that is completely obliterated by the
398 // store to 'Loc' then we can remove it.
399 if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA)) {
400 // Delete the store and now-dead instructions that feed it.
401 DeleteDeadInstruction(DepWrite, *MD);
405 // DeleteDeadInstruction can delete the current instruction in loop
408 if (BBI != BB.begin())
413 // If this is a may-aliased store that is clobbering the store value, we
414 // can keep searching past it for another must-aliased pointer that stores
415 // to the same location. For example, in:
419 // we can remove the first store to P even though we don't know if P and Q
421 if (DepWrite == &BB.front()) break;
423 // Can't look past this instruction if it might read 'Loc'.
424 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
427 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
431 // If this block ends in a return, unwind, or unreachable, all allocas are
432 // dead at its end, which means stores to them are also dead.
433 if (BB.getTerminator()->getNumSuccessors() == 0)
434 MadeChange |= handleEndBlock(BB);
439 /// HandleFree - Handle frees of entire structures whose dependency is a store
440 /// to a field of that structure.
441 bool DSE::HandleFree(CallInst *F) {
442 MemDepResult Dep = MD->getDependency(F);
444 if (Dep.isNonLocal()) return false;
446 Instruction *Dependency = Dep.getInst();
447 if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
451 getStoredPointerOperand(Dependency)->getUnderlyingObject();
453 // Check for aliasing.
454 if (AA->alias(F->getArgOperand(0), 1, DepPointer, 1) !=
455 AliasAnalysis::MustAlias)
458 // DCE instructions only used to calculate that store
459 DeleteDeadInstruction(Dependency, *MD);
462 // Inst's old Dependency is now deleted. Compute the next dependency,
463 // which may also be dead, as in
465 // s[1] = 0; // This has just been deleted.
467 Dep = MD->getDependency(F);
468 } while (!Dep.isNonLocal());
473 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
474 /// function end block. Ex:
477 /// store i32 1, i32* %A
479 bool DSE::handleEndBlock(BasicBlock &BB) {
480 bool MadeChange = false;
482 // Keep track of all of the stack objects that are dead at the end of the
484 SmallPtrSet<Value*, 16> DeadStackObjects;
486 // Find all of the alloca'd pointers in the entry block.
487 BasicBlock *Entry = BB.getParent()->begin();
488 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
489 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
490 DeadStackObjects.insert(AI);
492 // Treat byval arguments the same, stores to them are dead at the end of the
494 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
495 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
496 if (AI->hasByValAttr())
497 DeadStackObjects.insert(AI);
499 // Scan the basic block backwards
500 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
503 // If we find a store, check to see if it points into a dead stack value.
504 if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
505 // See through pointer-to-pointer bitcasts
506 Value *Pointer = getStoredPointerOperand(BBI)->getUnderlyingObject();
508 // Stores to stack values are valid candidates for removal.
509 if (DeadStackObjects.count(Pointer)) {
510 // DCE instructions only used to calculate that store.
511 Instruction *Dead = BBI++;
512 DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
519 // Remove any dead non-memory-mutating instructions.
520 if (isInstructionTriviallyDead(BBI)) {
521 Instruction *Inst = BBI++;
522 DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
528 if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
529 DeadStackObjects.erase(A);
533 if (CallSite CS = cast<Value>(BBI)) {
534 // If this call does not access memory, it can't be loading any of our
536 if (AA->doesNotAccessMemory(CS))
539 unsigned NumModRef = 0, NumOther = 0;
541 // If the call might load from any of our allocas, then any store above
543 SmallVector<Value*, 8> LiveAllocas;
544 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
545 E = DeadStackObjects.end(); I != E; ++I) {
546 // If we detect that our AA is imprecise, it's not worth it to scan the
547 // rest of the DeadPointers set. Just assume that the AA will return
548 // ModRef for everything, and go ahead and bail out.
549 if (NumModRef >= 16 && NumOther == 0)
552 // See if the call site touches it.
553 AliasAnalysis::ModRefResult A =
554 AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
556 if (A == AliasAnalysis::ModRef)
561 if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
562 LiveAllocas.push_back(*I);
565 for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
566 E = LiveAllocas.end(); I != E; ++I)
567 DeadStackObjects.erase(*I);
569 // If all of the allocas were clobbered by the call then we're not going
570 // to find anything else to process.
571 if (DeadStackObjects.empty())
577 AliasAnalysis::Location LoadedLoc;
579 // If we encounter a use of the pointer, it is no longer considered dead
580 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
581 LoadedLoc = AA->getLocation(L);
582 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
583 LoadedLoc = AA->getLocation(V);
584 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
585 LoadedLoc = AA->getLocationForSource(MTI);
587 // Not a loading instruction.
591 // Remove any allocas from the DeadPointer set that are loaded, as this
592 // makes any stores above the access live.
593 RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
595 // If all of the allocas were clobbered by the access then we're not going
596 // to find anything else to process.
597 if (DeadStackObjects.empty())
604 /// RemoveAccessedObjects - Check to see if the specified location may alias any
605 /// of the stack objects in the DeadStackObjects set. If so, they become live
606 /// because the location is being loaded.
607 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
608 SmallPtrSet<Value*, 16> &DeadStackObjects) {
609 const Value *UnderlyingPointer = LoadedLoc.Ptr->getUnderlyingObject();
611 // A constant can't be in the dead pointer set.
612 if (isa<Constant>(UnderlyingPointer))
615 // If the kill pointer can be easily reduced to an alloca, don't bother doing
616 // extraneous AA queries.
617 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
618 DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer));
622 SmallVector<Value*, 16> NowLive;
623 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
624 E = DeadStackObjects.end(); I != E; ++I) {
625 // See if the loaded location could alias the stack location.
626 AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
627 if (!AA->isNoAlias(StackLoc, LoadedLoc))
628 NowLive.push_back(*I);
631 for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
633 DeadStackObjects.erase(*I);