1 //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass implements an idiom recognizer that transforms simple loops into a
11 // non-loop form. In cases that this kicks in, it can be a significant
14 //===----------------------------------------------------------------------===//
18 // Future loop memory idioms to recognize:
19 // memcmp, memmove, strlen, etc.
20 // Future floating point idioms to recognize in -ffast-math mode:
22 // Future integer operation idioms to recognize:
25 // Beware that isel's default lowering for ctpop is highly inefficient for
26 // i64 and larger types when i64 is legal and the value has few bits set. It
27 // would be good to enhance isel to emit a loop for ctpop in this case.
29 // We should enhance the memset/memcpy recognition to handle multiple stores in
30 // the loop. This would handle things like:
31 // void foo(_Complex float *P)
32 // for (i) { __real__(*P) = 0; __imag__(*P) = 0; }
34 // This could recognize common matrix multiplies and dot product idioms and
35 // replace them with calls to BLAS (if linked in??).
37 //===----------------------------------------------------------------------===//
39 #define DEBUG_TYPE "loop-idiom"
40 #include "llvm/Transforms/Scalar.h"
41 #include "llvm/IntrinsicInst.h"
42 #include "llvm/Module.h"
43 #include "llvm/Analysis/AliasAnalysis.h"
44 #include "llvm/Analysis/LoopPass.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/Analysis/ScalarEvolutionExpander.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Target/TargetData.h"
49 #include "llvm/Target/TargetLibraryInfo.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/IRBuilder.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/ADT/Statistic.h"
57 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
58 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
61 class LoopIdiomRecognize : public LoopPass {
66 TargetLibraryInfo *TLI;
69 explicit LoopIdiomRecognize() : LoopPass(ID) {
70 initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
73 bool runOnLoop(Loop *L, LPPassManager &LPM);
74 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
75 SmallVectorImpl<BasicBlock*> &ExitBlocks);
77 bool processLoopStore(StoreInst *SI, const SCEV *BECount);
78 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
80 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
81 unsigned StoreAlignment,
82 Value *SplatValue, Instruction *TheStore,
83 const SCEVAddRecExpr *Ev,
85 bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
86 const SCEVAddRecExpr *StoreEv,
87 const SCEVAddRecExpr *LoadEv,
90 /// This transformation requires natural loop information & requires that
91 /// loop preheaders be inserted into the CFG.
93 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
94 AU.addRequired<LoopInfo>();
95 AU.addPreserved<LoopInfo>();
96 AU.addRequiredID(LoopSimplifyID);
97 AU.addPreservedID(LoopSimplifyID);
98 AU.addRequiredID(LCSSAID);
99 AU.addPreservedID(LCSSAID);
100 AU.addRequired<AliasAnalysis>();
101 AU.addPreserved<AliasAnalysis>();
102 AU.addRequired<ScalarEvolution>();
103 AU.addPreserved<ScalarEvolution>();
104 AU.addPreserved<DominatorTree>();
105 AU.addRequired<DominatorTree>();
106 AU.addRequired<TargetLibraryInfo>();
111 char LoopIdiomRecognize::ID = 0;
112 INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
114 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
115 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
116 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
117 INITIALIZE_PASS_DEPENDENCY(LCSSA)
118 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
119 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
120 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
121 INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
124 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
126 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
127 /// and zero out all the operands of this instruction. If any of them become
128 /// dead, delete them and the computation tree that feeds them.
130 static void DeleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
131 SmallVector<Instruction*, 32> NowDeadInsts;
133 NowDeadInsts.push_back(I);
135 // Before we touch this instruction, remove it from SE!
137 Instruction *DeadInst = NowDeadInsts.pop_back_val();
139 // This instruction is dead, zap it, in stages. Start by removing it from
141 SE.forgetValue(DeadInst);
143 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
144 Value *Op = DeadInst->getOperand(op);
145 DeadInst->setOperand(op, 0);
147 // If this operand just became dead, add it to the NowDeadInsts list.
148 if (!Op->use_empty()) continue;
150 if (Instruction *OpI = dyn_cast<Instruction>(Op))
151 if (isInstructionTriviallyDead(OpI))
152 NowDeadInsts.push_back(OpI);
155 DeadInst->eraseFromParent();
157 } while (!NowDeadInsts.empty());
160 bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
163 // The trip count of the loop must be analyzable.
164 SE = &getAnalysis<ScalarEvolution>();
165 if (!SE->hasLoopInvariantBackedgeTakenCount(L))
167 const SCEV *BECount = SE->getBackedgeTakenCount(L);
168 if (isa<SCEVCouldNotCompute>(BECount)) return false;
170 // If this loop executes exactly one time, then it should be peeled, not
171 // optimized by this pass.
172 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
173 if (BECst->getValue()->getValue() == 0)
176 // We require target data for now.
177 TD = getAnalysisIfAvailable<TargetData>();
178 if (TD == 0) return false;
180 DT = &getAnalysis<DominatorTree>();
181 LoopInfo &LI = getAnalysis<LoopInfo>();
182 TLI = &getAnalysis<TargetLibraryInfo>();
184 SmallVector<BasicBlock*, 8> ExitBlocks;
185 CurLoop->getUniqueExitBlocks(ExitBlocks);
187 DEBUG(dbgs() << "loop-idiom Scanning: F["
188 << L->getHeader()->getParent()->getName()
189 << "] Loop %" << L->getHeader()->getName() << "\n");
191 bool MadeChange = false;
192 // Scan all the blocks in the loop that are not in subloops.
193 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
195 // Ignore blocks in subloops.
196 if (LI.getLoopFor(*BI) != CurLoop)
199 MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks);
204 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
205 /// with the specified backedge count. This block is known to be in the current
206 /// loop and not in any subloops.
207 bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
208 SmallVectorImpl<BasicBlock*> &ExitBlocks) {
209 // We can only promote stores in this block if they are unconditionally
210 // executed in the loop. For a block to be unconditionally executed, it has
211 // to dominate all the exit blocks of the loop. Verify this now.
212 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
213 if (!DT->dominates(BB, ExitBlocks[i]))
216 bool MadeChange = false;
217 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
218 Instruction *Inst = I++;
219 // Look for store instructions, which may be optimized to memset/memcpy.
220 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
222 if (!processLoopStore(SI, BECount)) continue;
225 // If processing the store invalidated our iterator, start over from the
232 // Look for memset instructions, which may be optimized to a larger memset.
233 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
235 if (!processLoopMemSet(MSI, BECount)) continue;
238 // If processing the memset invalidated our iterator, start over from the
250 /// processLoopStore - See if this store can be promoted to a memset or memcpy.
251 bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
252 if (SI->isVolatile()) return false;
254 Value *StoredVal = SI->getValueOperand();
255 Value *StorePtr = SI->getPointerOperand();
257 // Reject stores that are so large that they overflow an unsigned.
258 uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
259 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
262 // See if the pointer expression is an AddRec like {base,+,1} on the current
263 // loop, which indicates a strided store. If we have something else, it's a
264 // random store we can't handle.
265 const SCEVAddRecExpr *StoreEv =
266 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
267 if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
270 // Check to see if the stride matches the size of the store. If so, then we
271 // know that every byte is touched in the loop.
272 unsigned StoreSize = (unsigned)SizeInBits >> 3;
273 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
275 // TODO: Could also handle negative stride here someday, that will require the
276 // validity check in mayLoopAccessLocation to be updated though.
277 if (Stride == 0 || StoreSize != Stride->getValue()->getValue())
280 // See if we can optimize just this store in isolation.
281 if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
282 StoredVal, SI, StoreEv, BECount))
285 // If the stored value is a strided load in the same loop with the same stride
286 // this this may be transformable into a memcpy. This kicks in for stuff like
287 // for (i) A[i] = B[i];
288 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
289 const SCEVAddRecExpr *LoadEv =
290 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
291 if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
292 StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile())
293 if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
296 //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n";
301 /// processLoopMemSet - See if this memset can be promoted to a large memset.
302 bool LoopIdiomRecognize::
303 processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
304 // We can only handle non-volatile memsets with a constant size.
305 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false;
307 // If we're not allowed to hack on memset, we fail.
308 if (!TLI->has(LibFunc::memset))
311 Value *Pointer = MSI->getDest();
313 // See if the pointer expression is an AddRec like {base,+,1} on the current
314 // loop, which indicates a strided store. If we have something else, it's a
315 // random store we can't handle.
316 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
317 if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine())
320 // Reject memsets that are so large that they overflow an unsigned.
321 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
322 if ((SizeInBytes >> 32) != 0)
325 // Check to see if the stride matches the size of the memset. If so, then we
326 // know that every byte is touched in the loop.
327 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
329 // TODO: Could also handle negative stride here someday, that will require the
330 // validity check in mayLoopAccessLocation to be updated though.
331 if (Stride == 0 || MSI->getLength() != Stride->getValue())
334 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
335 MSI->getAlignment(), MSI->getValue(),
340 /// mayLoopAccessLocation - Return true if the specified loop might access the
341 /// specified pointer location, which is a loop-strided access. The 'Access'
342 /// argument specifies what the verboten forms of access are (read or write).
343 static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
344 Loop *L, const SCEV *BECount,
345 unsigned StoreSize, AliasAnalysis &AA,
346 Instruction *IgnoredStore) {
347 // Get the location that may be stored across the loop. Since the access is
348 // strided positively through memory, we say that the modified location starts
349 // at the pointer and has infinite size.
350 uint64_t AccessSize = AliasAnalysis::UnknownSize;
352 // If the loop iterates a fixed number of times, we can refine the access size
353 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
354 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
355 AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize;
357 // TODO: For this to be really effective, we have to dive into the pointer
358 // operand in the store. Store to &A[i] of 100 will always return may alias
359 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
360 // which will then no-alias a store to &A[100].
361 AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
363 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
365 for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
366 if (&*I != IgnoredStore &&
367 (AA.getModRefInfo(I, StoreLoc) & Access))
373 /// getMemSetPatternValue - If a strided store of the specified value is safe to
374 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
375 /// be passed in. Otherwise, return null.
377 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
378 /// just replicate their input array and then pass on to memset_pattern16.
379 static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
380 // If the value isn't a constant, we can't promote it to being in a constant
381 // array. We could theoretically do a store to an alloca or something, but
382 // that doesn't seem worthwhile.
383 Constant *C = dyn_cast<Constant>(V);
384 if (C == 0) return 0;
386 // Only handle simple values that are a power of two bytes in size.
387 uint64_t Size = TD.getTypeSizeInBits(V->getType());
388 if (Size == 0 || (Size & 7) || (Size & (Size-1)))
391 // Convert the constant to an integer type of the appropriate size so we can
392 // start hacking on it.
393 if (isa<PointerType>(V->getType()))
394 C = ConstantExpr::getPtrToInt(C, IntegerType::get(C->getContext(), Size));
395 else if (isa<VectorType>(V->getType()) || V->getType()->isFloatingPointTy())
396 C = ConstantExpr::getBitCast(C, IntegerType::get(C->getContext(), Size));
397 else if (!isa<IntegerType>(V->getType()))
398 return 0; // Unhandled type.
400 // Convert to size in bytes.
403 // If we couldn't fold this to an integer, we fail. We don't bother to handle
404 // relocatable expressions like the address of a global yet.
406 ConstantInt *CI = dyn_cast<ConstantInt>(C);
407 if (CI == 0) return 0;
409 APInt CVal = CI->getValue();
411 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
412 // if the top and bottom are the same.
413 if (Size > 16) return 0;
415 // If this is a big endian target (PPC) then we need to bswap.
416 if (TD.isBigEndian())
417 CVal = CVal.byteSwap();
419 // Determine what each byte of the pattern value should be.
421 for (unsigned i = 0; i != 16; ++i) {
422 // Get the byte value we're indexing into.
423 unsigned CByte = i % Size;
424 Value[i] = (unsigned char)(CVal.getZExtValue() >> CByte);
427 return ConstantArray::get(V->getContext(), StringRef(Value, 16), false);
431 /// processLoopStridedStore - We see a strided store of some value. If we can
432 /// transform this into a memset or memset_pattern in the loop preheader, do so.
433 bool LoopIdiomRecognize::
434 processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
435 unsigned StoreAlignment, Value *StoredVal,
436 Instruction *TheStore, const SCEVAddRecExpr *Ev,
437 const SCEV *BECount) {
439 // If the stored value is a byte-wise value (like i32 -1), then it may be
440 // turned into a memset of i8 -1, assuming that all the consecutive bytes
441 // are stored. A store of i32 0x01020304 can never be turned into a memset,
442 // but it can be turned into memset_pattern if the target supports it.
443 Value *SplatValue = isBytewiseValue(StoredVal);
444 Constant *PatternValue = 0;
446 // If we're allowed to form a memset, and the stored value would be acceptable
447 // for memset, use it.
448 if (SplatValue && TLI->has(LibFunc::memset) &&
449 // Verify that the stored value is loop invariant. If not, we can't
450 // promote the memset.
451 CurLoop->isLoopInvariant(SplatValue)) {
452 // Keep and use SplatValue.
454 } else if (TLI->has(LibFunc::memset_pattern16) &&
455 (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
456 // It looks like we can use PatternValue!
459 // Otherwise, this isn't an idiom we can transform. For example, we can't
460 // do anything with a 3-byte store, for example.
465 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
466 // this into a memset in the loop preheader now if we want. However, this
467 // would be unsafe to do if there is anything else in the loop that may read
468 // or write to the aliased location. Check for an alias.
469 if (mayLoopAccessLocation(DestPtr, AliasAnalysis::ModRef,
471 StoreSize, getAnalysis<AliasAnalysis>(), TheStore))
474 // Okay, everything looks good, insert the memset.
475 BasicBlock *Preheader = CurLoop->getLoopPreheader();
477 IRBuilder<> Builder(Preheader->getTerminator());
479 // The trip count of the loop and the base pointer of the addrec SCEV is
480 // guaranteed to be loop invariant, which means that it should dominate the
481 // header. Just insert code for it in the preheader.
482 SCEVExpander Expander(*SE);
484 unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
486 Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
487 Preheader->getTerminator());
489 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
490 // pointer size if it isn't already.
491 const Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
492 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
494 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
495 true /*no unsigned overflow*/);
497 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
498 true /*no unsigned overflow*/);
501 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
505 NewCall = Builder.CreateMemSet(BasePtr, SplatValue,NumBytes,StoreAlignment);
507 Module *M = TheStore->getParent()->getParent()->getParent();
508 Value *MSP = M->getOrInsertFunction("memset_pattern16",
510 Builder.getInt8PtrTy(),
511 Builder.getInt8PtrTy(), IntPtr,
514 // Otherwise we should form a memset_pattern16. PatternValue is known to be
515 // an constant array of 16-bytes. Plop the value into a mergable global.
516 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
517 GlobalValue::InternalLinkage,
518 PatternValue, ".memset_pattern");
519 GV->setUnnamedAddr(true); // Ok to merge these.
520 GV->setAlignment(16);
521 Value *PatternPtr = Builder.CreateConstInBoundsGEP2_32(GV, 0, 0, "pattern");
523 NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes);
526 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
527 << " from store to: " << *Ev << " at: " << *TheStore << "\n");
530 // Okay, the memset has been formed. Zap the original store and anything that
532 DeleteDeadInstruction(TheStore, *SE);
537 /// processLoopStoreOfLoopLoad - We see a strided store whose value is a
538 /// same-strided load.
539 bool LoopIdiomRecognize::
540 processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
541 const SCEVAddRecExpr *StoreEv,
542 const SCEVAddRecExpr *LoadEv,
543 const SCEV *BECount) {
544 // If we're not allowed to form memcpy, we fail.
545 if (!TLI->has(LibFunc::memcpy))
548 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
550 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
551 // this into a memcpy in the loop preheader now if we want. However, this
552 // would be unsafe to do if there is anything else in the loop that may read
553 // or write to the stored location (including the load feeding the stores).
554 // Check for an alias.
555 if (mayLoopAccessLocation(SI->getPointerOperand(), AliasAnalysis::ModRef,
556 CurLoop, BECount, StoreSize,
557 getAnalysis<AliasAnalysis>(), SI))
560 // For a memcpy, we have to make sure that the input array is not being
561 // mutated by the loop.
562 if (mayLoopAccessLocation(LI->getPointerOperand(), AliasAnalysis::Mod,
563 CurLoop, BECount, StoreSize,
564 getAnalysis<AliasAnalysis>(), SI))
567 // Okay, everything looks good, insert the memcpy.
568 BasicBlock *Preheader = CurLoop->getLoopPreheader();
570 IRBuilder<> Builder(Preheader->getTerminator());
572 // The trip count of the loop and the base pointer of the addrec SCEV is
573 // guaranteed to be loop invariant, which means that it should dominate the
574 // header. Just insert code for it in the preheader.
575 SCEVExpander Expander(*SE);
578 Expander.expandCodeFor(LoadEv->getStart(),
579 Builder.getInt8PtrTy(LI->getPointerAddressSpace()),
580 Preheader->getTerminator());
581 Value *StoreBasePtr =
582 Expander.expandCodeFor(StoreEv->getStart(),
583 Builder.getInt8PtrTy(SI->getPointerAddressSpace()),
584 Preheader->getTerminator());
586 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
587 // pointer size if it isn't already.
588 const Type *IntPtr = TD->getIntPtrType(SI->getContext());
589 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
591 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
592 true /*no unsigned overflow*/);
594 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
595 true /*no unsigned overflow*/);
598 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
601 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
602 std::min(SI->getAlignment(), LI->getAlignment()));
604 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
605 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
606 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
609 // Okay, the memset has been formed. Zap the original store and anything that
611 DeleteDeadInstruction(SI, *SE);