1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
23 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
25 // Try to kill dead allocas by walking through its uses until we see some use
26 // that could escape. This is a conservative analysis which tries to handle
27 // GEPs, bitcasts, stores, and no-op intrinsics. These tend to be the things
28 // left after inlining and SROA finish chewing on an alloca.
29 static Instruction *removeDeadAlloca(InstCombiner &IC, AllocaInst &AI) {
30 SmallVector<Instruction *, 4> Worklist, DeadStores;
31 Worklist.push_back(&AI);
33 Instruction *PI = Worklist.pop_back_val();
34 for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end();
36 Instruction *I = cast<Instruction>(*UI);
37 switch (I->getOpcode()) {
39 // Give up the moment we see something we can't handle.
42 case Instruction::GetElementPtr:
43 case Instruction::BitCast:
44 Worklist.push_back(I);
47 case Instruction::Call:
48 // We can handle a limited subset of calls to no-op intrinsics.
49 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
50 switch (II->getIntrinsicID()) {
51 case Intrinsic::dbg_declare:
52 case Intrinsic::dbg_value:
53 case Intrinsic::invariant_start:
54 case Intrinsic::invariant_end:
55 case Intrinsic::lifetime_start:
56 case Intrinsic::lifetime_end:
62 // Reject everything else.
65 case Instruction::Store: {
66 // Stores into the alloca are only live if the alloca is live.
67 StoreInst *SI = cast<StoreInst>(I);
68 // We can eliminate atomic stores, but not volatile.
71 // The store is only trivially safe if the poniter is the destination
72 // as opposed to the value. We're conservative here and don't check for
73 // the case where we store the address of a dead alloca into a dead
75 if (SI->getPointerOperand() != PI)
77 DeadStores.push_back(I);
82 } while (!Worklist.empty());
84 // The alloca is dead. Kill off all the stores to it, and then replace it
86 while (!DeadStores.empty())
87 IC.EraseInstFromFunction(*DeadStores.pop_back_val());
88 return IC.ReplaceInstUsesWith(AI, UndefValue::get(AI.getType()));
91 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
92 // Ensure that the alloca array size argument has type intptr_t, so that
93 // any casting is exposed early.
95 Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
96 if (AI.getArraySize()->getType() != IntPtrTy) {
97 Value *V = Builder->CreateIntCast(AI.getArraySize(),
104 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
105 if (AI.isArrayAllocation()) { // Check C != 1
106 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
108 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
109 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
110 New->setAlignment(AI.getAlignment());
112 // Scan to the end of the allocation instructions, to skip over a block of
113 // allocas if possible...also skip interleaved debug info
115 BasicBlock::iterator It = New;
116 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
118 // Now that I is pointing to the first non-allocation-inst in the block,
119 // insert our getelementptr instruction...
121 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
126 GetElementPtrInst::CreateInBounds(New, Idx, New->getName()+".sub");
127 InsertNewInstBefore(GEP, *It);
129 // Now make everything use the getelementptr instead of the original
131 return ReplaceInstUsesWith(AI, GEP);
132 } else if (isa<UndefValue>(AI.getArraySize())) {
133 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
137 if (TD && AI.getAllocatedType()->isSized()) {
138 // If the alignment is 0 (unspecified), assign it the preferred alignment.
139 if (AI.getAlignment() == 0)
140 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
142 // Move all alloca's of zero byte objects to the entry block and merge them
143 // together. Note that we only do this for alloca's, because malloc should
144 // allocate and return a unique pointer, even for a zero byte allocation.
145 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
146 // For a zero sized alloca there is no point in doing an array allocation.
147 // This is helpful if the array size is a complicated expression not used
149 if (AI.isArrayAllocation()) {
150 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
154 // Get the first instruction in the entry block.
155 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
156 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
157 if (FirstInst != &AI) {
158 // If the entry block doesn't start with a zero-size alloca then move
159 // this one to the start of the entry block. There is no problem with
160 // dominance as the array size was forced to a constant earlier already.
161 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
162 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
163 TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
164 AI.moveBefore(FirstInst);
168 // Replace this zero-sized alloca with the one at the start of the entry
169 // block after ensuring that the address will be aligned enough for both
172 std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
173 TD->getPrefTypeAlignment(AI.getAllocatedType()));
174 EntryAI->setAlignment(MaxAlign);
175 if (AI.getType() != EntryAI->getType())
176 return new BitCastInst(EntryAI, AI.getType());
177 return ReplaceInstUsesWith(AI, EntryAI);
182 // Try to aggressively remove allocas which are only used for GEPs, lifetime
183 // markers, and stores. This happens when SROA iteratively promotes stores
184 // out of the alloca, and we need to cleanup after it.
185 return removeDeadAlloca(*this, AI);
189 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
190 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
191 const TargetData *TD) {
192 User *CI = cast<User>(LI.getOperand(0));
193 Value *CastOp = CI->getOperand(0);
195 PointerType *DestTy = cast<PointerType>(CI->getType());
196 Type *DestPTy = DestTy->getElementType();
197 if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
199 // If the address spaces don't match, don't eliminate the cast.
200 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
203 Type *SrcPTy = SrcTy->getElementType();
205 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
206 DestPTy->isVectorTy()) {
207 // If the source is an array, the code below will not succeed. Check to
208 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
210 if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
211 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
212 if (ASrcTy->getNumElements() != 0) {
214 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
216 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
217 SrcTy = cast<PointerType>(CastOp->getType());
218 SrcPTy = SrcTy->getElementType();
221 if (IC.getTargetData() &&
222 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
223 SrcPTy->isVectorTy()) &&
224 // Do not allow turning this into a load of an integer, which is then
225 // casted to a pointer, this pessimizes pointer analysis a lot.
226 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
227 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
228 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
230 // Okay, we are casting from one integer or pointer type to another of
231 // the same size. Instead of casting the pointer before the load, cast
232 // the result of the loaded value.
234 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
235 NewLoad->setAlignment(LI.getAlignment());
236 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
237 // Now cast the result of the load.
238 return new BitCastInst(NewLoad, LI.getType());
245 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
246 Value *Op = LI.getOperand(0);
248 // Attempt to improve the alignment.
250 unsigned KnownAlign =
251 getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
252 unsigned LoadAlign = LI.getAlignment();
253 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
254 TD->getABITypeAlignment(LI.getType());
256 if (KnownAlign > EffectiveLoadAlign)
257 LI.setAlignment(KnownAlign);
258 else if (LoadAlign == 0)
259 LI.setAlignment(EffectiveLoadAlign);
262 // load (cast X) --> cast (load X) iff safe.
263 if (isa<CastInst>(Op))
264 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
267 // None of the following transforms are legal for volatile/atomic loads.
268 // FIXME: Some of it is okay for atomic loads; needs refactoring.
269 if (!LI.isSimple()) return 0;
271 // Do really simple store-to-load forwarding and load CSE, to catch cases
272 // where there are several consecutive memory accesses to the same location,
273 // separated by a few arithmetic operations.
274 BasicBlock::iterator BBI = &LI;
275 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
276 return ReplaceInstUsesWith(LI, AvailableVal);
278 // load(gep null, ...) -> unreachable
279 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
280 const Value *GEPI0 = GEPI->getOperand(0);
281 // TODO: Consider a target hook for valid address spaces for this xform.
282 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
283 // Insert a new store to null instruction before the load to indicate
284 // that this code is not reachable. We do this instead of inserting
285 // an unreachable instruction directly because we cannot modify the
287 new StoreInst(UndefValue::get(LI.getType()),
288 Constant::getNullValue(Op->getType()), &LI);
289 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
293 // load null/undef -> unreachable
294 // TODO: Consider a target hook for valid address spaces for this xform.
295 if (isa<UndefValue>(Op) ||
296 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
297 // Insert a new store to null instruction before the load to indicate that
298 // this code is not reachable. We do this instead of inserting an
299 // unreachable instruction directly because we cannot modify the CFG.
300 new StoreInst(UndefValue::get(LI.getType()),
301 Constant::getNullValue(Op->getType()), &LI);
302 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
305 // Instcombine load (constantexpr_cast global) -> cast (load global)
306 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
308 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
311 if (Op->hasOneUse()) {
312 // Change select and PHI nodes to select values instead of addresses: this
313 // helps alias analysis out a lot, allows many others simplifications, and
314 // exposes redundancy in the code.
316 // Note that we cannot do the transformation unless we know that the
317 // introduced loads cannot trap! Something like this is valid as long as
318 // the condition is always false: load (select bool %C, int* null, int* %G),
319 // but it would not be valid if we transformed it to load from null
322 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
323 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
324 unsigned Align = LI.getAlignment();
325 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
326 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
327 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
328 SI->getOperand(1)->getName()+".val");
329 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
330 SI->getOperand(2)->getName()+".val");
331 V1->setAlignment(Align);
332 V2->setAlignment(Align);
333 return SelectInst::Create(SI->getCondition(), V1, V2);
336 // load (select (cond, null, P)) -> load P
337 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
338 if (C->isNullValue()) {
339 LI.setOperand(0, SI->getOperand(2));
343 // load (select (cond, P, null)) -> load P
344 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
345 if (C->isNullValue()) {
346 LI.setOperand(0, SI->getOperand(1));
354 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
355 /// when possible. This makes it generally easy to do alias analysis and/or
356 /// SROA/mem2reg of the memory object.
357 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
358 User *CI = cast<User>(SI.getOperand(1));
359 Value *CastOp = CI->getOperand(0);
361 Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
362 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
363 if (SrcTy == 0) return 0;
365 Type *SrcPTy = SrcTy->getElementType();
367 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
370 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
371 /// to its first element. This allows us to handle things like:
372 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
374 SmallVector<Value*, 4> NewGEPIndices;
376 // If the source is an array, the code below will not succeed. Check to
377 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
379 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
380 // Index through pointer.
381 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
382 NewGEPIndices.push_back(Zero);
385 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
386 if (!STy->getNumElements()) /* Struct can be empty {} */
388 NewGEPIndices.push_back(Zero);
389 SrcPTy = STy->getElementType(0);
390 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
391 NewGEPIndices.push_back(Zero);
392 SrcPTy = ATy->getElementType();
398 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
401 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
404 // If the pointers point into different address spaces or if they point to
405 // values with different sizes, we can't do the transformation.
406 if (!IC.getTargetData() ||
407 SrcTy->getAddressSpace() !=
408 cast<PointerType>(CI->getType())->getAddressSpace() ||
409 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
410 IC.getTargetData()->getTypeSizeInBits(DestPTy))
413 // Okay, we are casting from one integer or pointer type to another of
414 // the same size. Instead of casting the pointer before
415 // the store, cast the value to be stored.
417 Value *SIOp0 = SI.getOperand(0);
418 Instruction::CastOps opcode = Instruction::BitCast;
419 Type* CastSrcTy = SIOp0->getType();
420 Type* CastDstTy = SrcPTy;
421 if (CastDstTy->isPointerTy()) {
422 if (CastSrcTy->isIntegerTy())
423 opcode = Instruction::IntToPtr;
424 } else if (CastDstTy->isIntegerTy()) {
425 if (SIOp0->getType()->isPointerTy())
426 opcode = Instruction::PtrToInt;
429 // SIOp0 is a pointer to aggregate and this is a store to the first field,
430 // emit a GEP to index into its first field.
431 if (!NewGEPIndices.empty())
432 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
434 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
435 SIOp0->getName()+".c");
436 SI.setOperand(0, NewCast);
437 SI.setOperand(1, CastOp);
441 /// equivalentAddressValues - Test if A and B will obviously have the same
442 /// value. This includes recognizing that %t0 and %t1 will have the same
443 /// value in code like this:
444 /// %t0 = getelementptr \@a, 0, 3
445 /// store i32 0, i32* %t0
446 /// %t1 = getelementptr \@a, 0, 3
447 /// %t2 = load i32* %t1
449 static bool equivalentAddressValues(Value *A, Value *B) {
450 // Test if the values are trivially equivalent.
451 if (A == B) return true;
453 // Test if the values come form identical arithmetic instructions.
454 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
455 // its only used to compare two uses within the same basic block, which
456 // means that they'll always either have the same value or one of them
457 // will have an undefined value.
458 if (isa<BinaryOperator>(A) ||
461 isa<GetElementPtrInst>(A))
462 if (Instruction *BI = dyn_cast<Instruction>(B))
463 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
466 // Otherwise they may not be equivalent.
470 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
471 Value *Val = SI.getOperand(0);
472 Value *Ptr = SI.getOperand(1);
474 // Attempt to improve the alignment.
476 unsigned KnownAlign =
477 getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
479 unsigned StoreAlign = SI.getAlignment();
480 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
481 TD->getABITypeAlignment(Val->getType());
483 if (KnownAlign > EffectiveStoreAlign)
484 SI.setAlignment(KnownAlign);
485 else if (StoreAlign == 0)
486 SI.setAlignment(EffectiveStoreAlign);
489 // Don't hack volatile/atomic stores.
490 // FIXME: Some bits are legal for atomic stores; needs refactoring.
491 if (!SI.isSimple()) return 0;
493 // If the RHS is an alloca with a single use, zapify the store, making the
495 if (Ptr->hasOneUse()) {
496 if (isa<AllocaInst>(Ptr))
497 return EraseInstFromFunction(SI);
498 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
499 if (isa<AllocaInst>(GEP->getOperand(0))) {
500 if (GEP->getOperand(0)->hasOneUse())
501 return EraseInstFromFunction(SI);
506 // Do really simple DSE, to catch cases where there are several consecutive
507 // stores to the same location, separated by a few arithmetic operations. This
508 // situation often occurs with bitfield accesses.
509 BasicBlock::iterator BBI = &SI;
510 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
513 // Don't count debug info directives, lest they affect codegen,
514 // and we skip pointer-to-pointer bitcasts, which are NOPs.
515 if (isa<DbgInfoIntrinsic>(BBI) ||
516 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
521 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
522 // Prev store isn't volatile, and stores to the same location?
523 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
527 EraseInstFromFunction(*PrevSI);
533 // If this is a load, we have to stop. However, if the loaded value is from
534 // the pointer we're loading and is producing the pointer we're storing,
535 // then *this* store is dead (X = load P; store X -> P).
536 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
537 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
539 return EraseInstFromFunction(SI);
541 // Otherwise, this is a load from some other location. Stores before it
546 // Don't skip over loads or things that can modify memory.
547 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
551 // store X, null -> turns into 'unreachable' in SimplifyCFG
552 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
553 if (!isa<UndefValue>(Val)) {
554 SI.setOperand(0, UndefValue::get(Val->getType()));
555 if (Instruction *U = dyn_cast<Instruction>(Val))
556 Worklist.Add(U); // Dropped a use.
558 return 0; // Do not modify these!
561 // store undef, Ptr -> noop
562 if (isa<UndefValue>(Val))
563 return EraseInstFromFunction(SI);
565 // If the pointer destination is a cast, see if we can fold the cast into the
567 if (isa<CastInst>(Ptr))
568 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
572 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
576 // If this store is the last instruction in the basic block (possibly
577 // excepting debug info instructions), and if the block ends with an
578 // unconditional branch, try to move it to the successor block.
582 } while (isa<DbgInfoIntrinsic>(BBI) ||
583 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
584 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
585 if (BI->isUnconditional())
586 if (SimplifyStoreAtEndOfBlock(SI))
587 return 0; // xform done!
592 /// SimplifyStoreAtEndOfBlock - Turn things like:
593 /// if () { *P = v1; } else { *P = v2 }
594 /// into a phi node with a store in the successor.
596 /// Simplify things like:
597 /// *P = v1; if () { *P = v2; }
598 /// into a phi node with a store in the successor.
600 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
601 BasicBlock *StoreBB = SI.getParent();
603 // Check to see if the successor block has exactly two incoming edges. If
604 // so, see if the other predecessor contains a store to the same location.
605 // if so, insert a PHI node (if needed) and move the stores down.
606 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
608 // Determine whether Dest has exactly two predecessors and, if so, compute
609 // the other predecessor.
610 pred_iterator PI = pred_begin(DestBB);
612 BasicBlock *OtherBB = 0;
617 if (++PI == pred_end(DestBB))
626 if (++PI != pred_end(DestBB))
629 // Bail out if all the relevant blocks aren't distinct (this can happen,
630 // for example, if SI is in an infinite loop)
631 if (StoreBB == DestBB || OtherBB == DestBB)
634 // Verify that the other block ends in a branch and is not otherwise empty.
635 BasicBlock::iterator BBI = OtherBB->getTerminator();
636 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
637 if (!OtherBr || BBI == OtherBB->begin())
640 // If the other block ends in an unconditional branch, check for the 'if then
641 // else' case. there is an instruction before the branch.
642 StoreInst *OtherStore = 0;
643 if (OtherBr->isUnconditional()) {
645 // Skip over debugging info.
646 while (isa<DbgInfoIntrinsic>(BBI) ||
647 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
648 if (BBI==OtherBB->begin())
652 // If this isn't a store, isn't a store to the same location, or is not the
653 // right kind of store, bail out.
654 OtherStore = dyn_cast<StoreInst>(BBI);
655 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
656 !SI.isSameOperationAs(OtherStore))
659 // Otherwise, the other block ended with a conditional branch. If one of the
660 // destinations is StoreBB, then we have the if/then case.
661 if (OtherBr->getSuccessor(0) != StoreBB &&
662 OtherBr->getSuccessor(1) != StoreBB)
665 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
666 // if/then triangle. See if there is a store to the same ptr as SI that
669 // Check to see if we find the matching store.
670 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
671 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
672 !SI.isSameOperationAs(OtherStore))
676 // If we find something that may be using or overwriting the stored
677 // value, or if we run out of instructions, we can't do the xform.
678 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
679 BBI == OtherBB->begin())
683 // In order to eliminate the store in OtherBr, we have to
684 // make sure nothing reads or overwrites the stored value in
686 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
687 // FIXME: This should really be AA driven.
688 if (I->mayReadFromMemory() || I->mayWriteToMemory())
693 // Insert a PHI node now if we need it.
694 Value *MergedVal = OtherStore->getOperand(0);
695 if (MergedVal != SI.getOperand(0)) {
696 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
697 PN->addIncoming(SI.getOperand(0), SI.getParent());
698 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
699 MergedVal = InsertNewInstBefore(PN, DestBB->front());
702 // Advance to a place where it is safe to insert the new store and
704 BBI = DestBB->getFirstInsertionPt();
705 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
710 InsertNewInstBefore(NewSI, *BBI);
711 NewSI->setDebugLoc(OtherStore->getDebugLoc());
713 // Nuke the old stores.
714 EraseInstFromFunction(SI);
715 EraseInstFromFunction(*OtherStore);