1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Support/PatternMatch.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
20 #include "llvm/Transforms/Utils/Local.h"
21 #include "llvm/ADT/Statistic.h"
23 using namespace PatternMatch;
25 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
27 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
28 // Ensure that the alloca array size argument has type intptr_t, so that
29 // any casting is exposed early.
31 const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
32 if (AI.getArraySize()->getType() != IntPtrTy) {
33 Value *V = Builder->CreateIntCast(AI.getArraySize(),
40 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
41 if (AI.isArrayAllocation()) { // Check C != 1
42 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
44 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
45 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
46 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
47 New->setAlignment(AI.getAlignment());
49 // Scan to the end of the allocation instructions, to skip over a block of
50 // allocas if possible...also skip interleaved debug info
52 BasicBlock::iterator It = New;
53 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
55 // Now that I is pointing to the first non-allocation-inst in the block,
56 // insert our getelementptr instruction...
58 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
62 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
63 New->getName()+".sub", It);
65 // Now make everything use the getelementptr instead of the original
67 return ReplaceInstUsesWith(AI, V);
68 } else if (isa<UndefValue>(AI.getArraySize())) {
69 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
73 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
74 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
75 // Note that we only do this for alloca's, because malloc should allocate
76 // and return a unique pointer, even for a zero byte allocation.
77 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
78 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
80 // If the alignment is 0 (unspecified), assign it the preferred alignment.
81 if (AI.getAlignment() == 0)
82 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
89 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
90 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
91 const TargetData *TD) {
92 User *CI = cast<User>(LI.getOperand(0));
93 Value *CastOp = CI->getOperand(0);
95 const PointerType *DestTy = cast<PointerType>(CI->getType());
96 const Type *DestPTy = DestTy->getElementType();
97 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
99 // If the address spaces don't match, don't eliminate the cast.
100 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
103 const Type *SrcPTy = SrcTy->getElementType();
105 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
106 DestPTy->isVectorTy()) {
107 // If the source is an array, the code below will not succeed. Check to
108 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
110 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
111 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
112 if (ASrcTy->getNumElements() != 0) {
114 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
116 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
117 SrcTy = cast<PointerType>(CastOp->getType());
118 SrcPTy = SrcTy->getElementType();
121 if (IC.getTargetData() &&
122 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
123 SrcPTy->isVectorTy()) &&
124 // Do not allow turning this into a load of an integer, which is then
125 // casted to a pointer, this pessimizes pointer analysis a lot.
126 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
127 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
128 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
130 // Okay, we are casting from one integer or pointer type to another of
131 // the same size. Instead of casting the pointer before the load, cast
132 // the result of the loaded value.
134 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
135 NewLoad->setAlignment(LI.getAlignment());
136 // Now cast the result of the load.
137 return new BitCastInst(NewLoad, LI.getType());
144 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
145 Value *Op = LI.getOperand(0);
147 // Attempt to improve the alignment.
149 unsigned KnownAlign =
150 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
151 unsigned LoadAlign = LI.getAlignment();
152 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
153 TD->getABITypeAlignment(LI.getType());
155 if (KnownAlign > EffectiveLoadAlign)
156 LI.setAlignment(KnownAlign);
157 else if (LoadAlign == 0)
158 LI.setAlignment(EffectiveLoadAlign);
161 // load (cast X) --> cast (load X) iff safe.
162 if (isa<CastInst>(Op))
163 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
166 // None of the following transforms are legal for volatile loads.
167 if (LI.isVolatile()) return 0;
169 // Do really simple store-to-load forwarding and load CSE, to catch cases
170 // where there are several consequtive memory accesses to the same location,
171 // separated by a few arithmetic operations.
172 BasicBlock::iterator BBI = &LI;
173 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
174 return ReplaceInstUsesWith(LI, AvailableVal);
176 // load(gep null, ...) -> unreachable
177 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
178 const Value *GEPI0 = GEPI->getOperand(0);
179 // TODO: Consider a target hook for valid address spaces for this xform.
180 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
181 // Insert a new store to null instruction before the load to indicate
182 // that this code is not reachable. We do this instead of inserting
183 // an unreachable instruction directly because we cannot modify the
185 new StoreInst(UndefValue::get(LI.getType()),
186 Constant::getNullValue(Op->getType()), &LI);
187 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
191 // load null/undef -> unreachable
192 // TODO: Consider a target hook for valid address spaces for this xform.
193 if (isa<UndefValue>(Op) ||
194 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
195 // Insert a new store to null instruction before the load to indicate that
196 // this code is not reachable. We do this instead of inserting an
197 // unreachable instruction directly because we cannot modify the CFG.
198 new StoreInst(UndefValue::get(LI.getType()),
199 Constant::getNullValue(Op->getType()), &LI);
200 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
203 // Instcombine load (constantexpr_cast global) -> cast (load global)
204 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
206 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
209 if (Op->hasOneUse()) {
210 // Change select and PHI nodes to select values instead of addresses: this
211 // helps alias analysis out a lot, allows many others simplifications, and
212 // exposes redundancy in the code.
214 // Note that we cannot do the transformation unless we know that the
215 // introduced loads cannot trap! Something like this is valid as long as
216 // the condition is always false: load (select bool %C, int* null, int* %G),
217 // but it would not be valid if we transformed it to load from null
220 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
221 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
222 unsigned Align = LI.getAlignment();
223 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
224 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
225 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
226 SI->getOperand(1)->getName()+".val");
227 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
228 SI->getOperand(2)->getName()+".val");
229 V1->setAlignment(Align);
230 V2->setAlignment(Align);
231 return SelectInst::Create(SI->getCondition(), V1, V2);
234 // load (select (cond, null, P)) -> load P
235 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
236 if (C->isNullValue()) {
237 LI.setOperand(0, SI->getOperand(2));
241 // load (select (cond, P, null)) -> load P
242 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
243 if (C->isNullValue()) {
244 LI.setOperand(0, SI->getOperand(1));
252 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
253 /// when possible. This makes it generally easy to do alias analysis and/or
254 /// SROA/mem2reg of the memory object.
255 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
256 User *CI = cast<User>(SI.getOperand(1));
257 Value *CastOp = CI->getOperand(0);
259 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
260 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
261 if (SrcTy == 0) return 0;
263 const Type *SrcPTy = SrcTy->getElementType();
265 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
268 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
269 /// to its first element. This allows us to handle things like:
270 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
272 SmallVector<Value*, 4> NewGEPIndices;
274 // If the source is an array, the code below will not succeed. Check to
275 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
277 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
278 // Index through pointer.
279 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
280 NewGEPIndices.push_back(Zero);
283 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
284 if (!STy->getNumElements()) /* Struct can be empty {} */
286 NewGEPIndices.push_back(Zero);
287 SrcPTy = STy->getElementType(0);
288 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
289 NewGEPIndices.push_back(Zero);
290 SrcPTy = ATy->getElementType();
296 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
299 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
302 // If the pointers point into different address spaces or if they point to
303 // values with different sizes, we can't do the transformation.
304 if (!IC.getTargetData() ||
305 SrcTy->getAddressSpace() !=
306 cast<PointerType>(CI->getType())->getAddressSpace() ||
307 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
308 IC.getTargetData()->getTypeSizeInBits(DestPTy))
311 // Okay, we are casting from one integer or pointer type to another of
312 // the same size. Instead of casting the pointer before
313 // the store, cast the value to be stored.
315 Value *SIOp0 = SI.getOperand(0);
316 Instruction::CastOps opcode = Instruction::BitCast;
317 const Type* CastSrcTy = SIOp0->getType();
318 const Type* CastDstTy = SrcPTy;
319 if (CastDstTy->isPointerTy()) {
320 if (CastSrcTy->isIntegerTy())
321 opcode = Instruction::IntToPtr;
322 } else if (CastDstTy->isIntegerTy()) {
323 if (SIOp0->getType()->isPointerTy())
324 opcode = Instruction::PtrToInt;
327 // SIOp0 is a pointer to aggregate and this is a store to the first field,
328 // emit a GEP to index into its first field.
329 if (!NewGEPIndices.empty())
330 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
331 NewGEPIndices.end());
333 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
334 SIOp0->getName()+".c");
335 return new StoreInst(NewCast, CastOp);
338 /// equivalentAddressValues - Test if A and B will obviously have the same
339 /// value. This includes recognizing that %t0 and %t1 will have the same
340 /// value in code like this:
341 /// %t0 = getelementptr \@a, 0, 3
342 /// store i32 0, i32* %t0
343 /// %t1 = getelementptr \@a, 0, 3
344 /// %t2 = load i32* %t1
346 static bool equivalentAddressValues(Value *A, Value *B) {
347 // Test if the values are trivially equivalent.
348 if (A == B) return true;
350 // Test if the values come form identical arithmetic instructions.
351 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
352 // its only used to compare two uses within the same basic block, which
353 // means that they'll always either have the same value or one of them
354 // will have an undefined value.
355 if (isa<BinaryOperator>(A) ||
358 isa<GetElementPtrInst>(A))
359 if (Instruction *BI = dyn_cast<Instruction>(B))
360 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
363 // Otherwise they may not be equivalent.
367 // If this instruction has two uses, one of which is a llvm.dbg.declare,
368 // return the llvm.dbg.declare.
369 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
372 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
375 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U))
377 if (isa<BitCastInst>(U) && U->hasOneUse()) {
378 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(*U->use_begin()))
385 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
386 Value *Val = SI.getOperand(0);
387 Value *Ptr = SI.getOperand(1);
389 // If the RHS is an alloca with a single use, zapify the store, making the
391 // If the RHS is an alloca with a two uses, the other one being a
392 // llvm.dbg.declare, zapify the store and the declare, making the
393 // alloca dead. We must do this to prevent declares from affecting
395 if (!SI.isVolatile()) {
396 if (Ptr->hasOneUse()) {
397 if (isa<AllocaInst>(Ptr))
398 return EraseInstFromFunction(SI);
399 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
400 if (isa<AllocaInst>(GEP->getOperand(0))) {
401 if (GEP->getOperand(0)->hasOneUse())
402 return EraseInstFromFunction(SI);
403 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
404 EraseInstFromFunction(*DI);
405 return EraseInstFromFunction(SI);
410 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
411 EraseInstFromFunction(*DI);
412 return EraseInstFromFunction(SI);
416 // Attempt to improve the alignment.
418 unsigned KnownAlign =
419 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
420 unsigned StoreAlign = SI.getAlignment();
421 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
422 TD->getABITypeAlignment(Val->getType());
424 if (KnownAlign > EffectiveStoreAlign)
425 SI.setAlignment(KnownAlign);
426 else if (StoreAlign == 0)
427 SI.setAlignment(EffectiveStoreAlign);
430 // Do really simple DSE, to catch cases where there are several consecutive
431 // stores to the same location, separated by a few arithmetic operations. This
432 // situation often occurs with bitfield accesses.
433 BasicBlock::iterator BBI = &SI;
434 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
437 // Don't count debug info directives, lest they affect codegen,
438 // and we skip pointer-to-pointer bitcasts, which are NOPs.
439 if (isa<DbgInfoIntrinsic>(BBI) ||
440 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
445 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
446 // Prev store isn't volatile, and stores to the same location?
447 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
451 EraseInstFromFunction(*PrevSI);
457 // If this is a load, we have to stop. However, if the loaded value is from
458 // the pointer we're loading and is producing the pointer we're storing,
459 // then *this* store is dead (X = load P; store X -> P).
460 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
461 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
463 return EraseInstFromFunction(SI);
465 // Otherwise, this is a load from some other location. Stores before it
470 // Don't skip over loads or things that can modify memory.
471 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
476 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
478 // Attempt to narrow sequences where we load a wide value, perform bitmasks
479 // that only affect the low bits of it, and then store it back. This
480 // typically arises from bitfield initializers in C++.
481 ConstantInt *CI1 =0, *CI2 = 0;
483 if (getTargetData() &&
484 match(SI.getValueOperand(),
485 m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
487 equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
488 APInt OrMask = CI1->getValue();
489 APInt AndMask = CI2->getValue();
491 // Compute the prefix of the value that is unmodified by the bitmasking.
492 unsigned LeadingAndOnes = AndMask.countLeadingOnes();
493 unsigned LeadingOrZeros = OrMask.countLeadingZeros();
494 unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
495 uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
496 while (NewWidth < AndMask.getBitWidth() &&
497 getTargetData()->isIllegalInteger(NewWidth))
498 NewWidth = NextPowerOf2(NewWidth);
500 // If we can find a power-of-2 prefix (and if the values we're working with
501 // are themselves POT widths), then we can narrow the store. We rely on
502 // later iterations of instcombine to propagate the demanded bits to narrow
503 // the other computations in the chain.
504 if (NewWidth < AndMask.getBitWidth() &&
505 getTargetData()->isLegalInteger(NewWidth)) {
506 const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
507 const Type *NewPtrType = PointerType::getUnqual(NewType);
509 Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
510 Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
512 // On big endian targets, we need to offset from the original pointer
513 // in order to store to the low-bit suffix.
514 if (getTargetData()->isBigEndian()) {
515 uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
516 NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
519 return new StoreInst(NewVal, NewPtr);
523 // store X, null -> turns into 'unreachable' in SimplifyCFG
524 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
525 if (!isa<UndefValue>(Val)) {
526 SI.setOperand(0, UndefValue::get(Val->getType()));
527 if (Instruction *U = dyn_cast<Instruction>(Val))
528 Worklist.Add(U); // Dropped a use.
530 return 0; // Do not modify these!
533 // store undef, Ptr -> noop
534 if (isa<UndefValue>(Val))
535 return EraseInstFromFunction(SI);
537 // If the pointer destination is a cast, see if we can fold the cast into the
539 if (isa<CastInst>(Ptr))
540 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
542 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
544 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
548 // If this store is the last instruction in the basic block (possibly
549 // excepting debug info instructions), and if the block ends with an
550 // unconditional branch, try to move it to the successor block.
554 } while (isa<DbgInfoIntrinsic>(BBI) ||
555 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
556 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
557 if (BI->isUnconditional())
558 if (SimplifyStoreAtEndOfBlock(SI))
559 return 0; // xform done!
564 /// SimplifyStoreAtEndOfBlock - Turn things like:
565 /// if () { *P = v1; } else { *P = v2 }
566 /// into a phi node with a store in the successor.
568 /// Simplify things like:
569 /// *P = v1; if () { *P = v2; }
570 /// into a phi node with a store in the successor.
572 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
573 BasicBlock *StoreBB = SI.getParent();
575 // Check to see if the successor block has exactly two incoming edges. If
576 // so, see if the other predecessor contains a store to the same location.
577 // if so, insert a PHI node (if needed) and move the stores down.
578 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
580 // Determine whether Dest has exactly two predecessors and, if so, compute
581 // the other predecessor.
582 pred_iterator PI = pred_begin(DestBB);
584 BasicBlock *OtherBB = 0;
589 if (++PI == pred_end(DestBB))
598 if (++PI != pred_end(DestBB))
601 // Bail out if all the relevant blocks aren't distinct (this can happen,
602 // for example, if SI is in an infinite loop)
603 if (StoreBB == DestBB || OtherBB == DestBB)
606 // Verify that the other block ends in a branch and is not otherwise empty.
607 BasicBlock::iterator BBI = OtherBB->getTerminator();
608 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
609 if (!OtherBr || BBI == OtherBB->begin())
612 // If the other block ends in an unconditional branch, check for the 'if then
613 // else' case. there is an instruction before the branch.
614 StoreInst *OtherStore = 0;
615 if (OtherBr->isUnconditional()) {
617 // Skip over debugging info.
618 while (isa<DbgInfoIntrinsic>(BBI) ||
619 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
620 if (BBI==OtherBB->begin())
624 // If this isn't a store, isn't a store to the same location, or if the
625 // alignments differ, bail out.
626 OtherStore = dyn_cast<StoreInst>(BBI);
627 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
628 OtherStore->getAlignment() != SI.getAlignment())
631 // Otherwise, the other block ended with a conditional branch. If one of the
632 // destinations is StoreBB, then we have the if/then case.
633 if (OtherBr->getSuccessor(0) != StoreBB &&
634 OtherBr->getSuccessor(1) != StoreBB)
637 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
638 // if/then triangle. See if there is a store to the same ptr as SI that
641 // Check to see if we find the matching store.
642 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
643 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
644 OtherStore->getAlignment() != SI.getAlignment())
648 // If we find something that may be using or overwriting the stored
649 // value, or if we run out of instructions, we can't do the xform.
650 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
651 BBI == OtherBB->begin())
655 // In order to eliminate the store in OtherBr, we have to
656 // make sure nothing reads or overwrites the stored value in
658 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
659 // FIXME: This should really be AA driven.
660 if (I->mayReadFromMemory() || I->mayWriteToMemory())
665 // Insert a PHI node now if we need it.
666 Value *MergedVal = OtherStore->getOperand(0);
667 if (MergedVal != SI.getOperand(0)) {
668 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
669 PN->reserveOperandSpace(2);
670 PN->addIncoming(SI.getOperand(0), SI.getParent());
671 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
672 MergedVal = InsertNewInstBefore(PN, DestBB->front());
675 // Advance to a place where it is safe to insert the new store and
677 BBI = DestBB->getFirstNonPHI();
678 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
679 OtherStore->isVolatile(),
680 SI.getAlignment()), *BBI);
682 // Nuke the old stores.
683 EraseInstFromFunction(SI);
684 EraseInstFromFunction(*OtherStore);