1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/GlobalVariable.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/GetElementPtrTypeIterator.h"
28 #include "llvm/Support/IRBuilder.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Target/TargetData.h"
34 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
35 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
36 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
37 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
39 static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
40 bool &VariableIdxFound, const TargetData &TD){
41 // Skip over the first indices.
42 gep_type_iterator GTI = gep_type_begin(GEP);
43 for (unsigned i = 1; i != Idx; ++i, ++GTI)
46 // Compute the offset implied by the rest of the indices.
48 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
49 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
51 return VariableIdxFound = true;
52 if (OpC->isZero()) continue; // No offset.
54 // Handle struct indices, which add their field offset to the pointer.
55 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
56 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
60 // Otherwise, we have a sequential type like an array or vector. Multiply
61 // the index by the ElementSize.
62 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
63 Offset += Size*OpC->getSExtValue();
69 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
70 /// constant offset, and return that constant offset. For example, Ptr1 might
71 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
72 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
73 const TargetData &TD) {
74 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
75 // base. After that base, they may have some number of common (and
76 // potentially variable) indices. After that they handle some constant
77 // offset, which determines their offset from each other. At this point, we
78 // handle no other case.
79 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
80 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
81 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
84 // Skip any common indices and track the GEP types.
86 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
87 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
90 bool VariableIdxFound = false;
91 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
92 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
93 if (VariableIdxFound) return false;
95 Offset = Offset2-Offset1;
100 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
101 /// This allows us to analyze stores like:
106 /// which sometimes happens with stores to arrays of structs etc. When we see
107 /// the first store, we make a range [1, 2). The second store extends the range
108 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
109 /// two ranges into [0, 3) which is memset'able.
112 // Start/End - A semi range that describes the span that this range covers.
113 // The range is closed at the start and open at the end: [Start, End).
116 /// StartPtr - The getelementptr instruction that points to the start of the
120 /// Alignment - The known alignment of the first store.
123 /// TheStores - The actual stores that make up this range.
124 SmallVector<StoreInst*, 16> TheStores;
126 bool isProfitableToUseMemset(const TargetData &TD) const;
129 } // end anon namespace
131 bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
132 // If we found more than 8 stores to merge or 64 bytes, use memset.
133 if (TheStores.size() >= 8 || End-Start >= 64) return true;
135 // Assume that the code generator is capable of merging pairs of stores
136 // together if it wants to.
137 if (TheStores.size() <= 2) return false;
139 // If we have fewer than 8 stores, it can still be worthwhile to do this.
140 // For example, merging 4 i8 stores into an i32 store is useful almost always.
141 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
142 // memset will be split into 2 32-bit stores anyway) and doing so can
143 // pessimize the llvm optimizer.
145 // Since we don't have perfect knowledge here, make some assumptions: assume
146 // the maximum GPR width is the same size as the pointer size and assume that
147 // this width can be stored. If so, check to see whether we will end up
148 // actually reducing the number of stores used.
149 unsigned Bytes = unsigned(End-Start);
150 unsigned NumPointerStores = Bytes/TD.getPointerSize();
152 // Assume the remaining bytes if any are done a byte at a time.
153 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
155 // If we will reduce the # stores (according to this heuristic), do the
156 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
158 return TheStores.size() > NumPointerStores+NumByteStores;
164 /// Ranges - A sorted list of the memset ranges. We use std::list here
165 /// because each element is relatively large and expensive to copy.
166 std::list<MemsetRange> Ranges;
167 typedef std::list<MemsetRange>::iterator range_iterator;
168 const TargetData &TD;
170 MemsetRanges(const TargetData &td) : TD(td) {}
172 typedef std::list<MemsetRange>::const_iterator const_iterator;
173 const_iterator begin() const { return Ranges.begin(); }
174 const_iterator end() const { return Ranges.end(); }
175 bool empty() const { return Ranges.empty(); }
177 void addStore(int64_t OffsetFromFirst, StoreInst *SI);
179 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
180 addStore(OffsetFromFirst, cast<StoreInst>(Inst));
184 } // end anon namespace
187 /// addStore - Add a new store to the MemsetRanges data structure. This adds a
188 /// new range for the specified store at the specified offset, merging into
189 /// existing ranges as appropriate.
190 void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
191 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
193 // Do a linear search of the ranges to see if this can be joined and/or to
194 // find the insertion point in the list. We keep the ranges sorted for
195 // simplicity here. This is a linear search of a linked list, which is ugly,
196 // however the number of ranges is limited, so this won't get crazy slow.
197 range_iterator I = Ranges.begin(), E = Ranges.end();
199 while (I != E && Start > I->End)
202 // We now know that I == E, in which case we didn't find anything to merge
203 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
204 // to insert a new range. Handle this now.
205 if (I == E || End < I->Start) {
206 MemsetRange &R = *Ranges.insert(I, MemsetRange());
209 R.StartPtr = SI->getPointerOperand();
210 R.Alignment = SI->getAlignment();
211 R.TheStores.push_back(SI);
215 // This store overlaps with I, add it.
216 I->TheStores.push_back(SI);
218 // At this point, we may have an interval that completely contains our store.
219 // If so, just add it to the interval and return.
220 if (I->Start <= Start && I->End >= End)
223 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
224 // but is not entirely contained within the range.
226 // See if the range extends the start of the range. In this case, it couldn't
227 // possibly cause it to join the prior range, because otherwise we would have
229 if (Start < I->Start) {
231 I->StartPtr = SI->getPointerOperand();
232 I->Alignment = SI->getAlignment();
235 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
236 // is in or right at the end of I), and that End >= I->Start. Extend I out to
240 range_iterator NextI = I;
241 while (++NextI != E && End >= NextI->Start) {
242 // Merge the range in.
243 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
244 if (NextI->End > I->End)
252 //===----------------------------------------------------------------------===//
254 //===----------------------------------------------------------------------===//
257 class MemCpyOpt : public FunctionPass {
258 MemoryDependenceAnalysis *MD;
259 const TargetData *TD;
261 static char ID; // Pass identification, replacement for typeid
262 MemCpyOpt() : FunctionPass(ID) {
263 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
267 bool runOnFunction(Function &F);
270 // This transformation requires dominator postdominator info
271 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
272 AU.setPreservesCFG();
273 AU.addRequired<DominatorTree>();
274 AU.addRequired<MemoryDependenceAnalysis>();
275 AU.addRequired<AliasAnalysis>();
276 AU.addPreserved<AliasAnalysis>();
277 AU.addPreserved<MemoryDependenceAnalysis>();
281 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
282 bool processMemCpy(MemCpyInst *M);
283 bool processMemMove(MemMoveInst *M);
284 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
285 uint64_t cpyLen, CallInst *C);
286 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
288 bool processByValArgument(CallSite CS, unsigned ArgNo);
289 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
292 bool iterateOnFunction(Function &F);
295 char MemCpyOpt::ID = 0;
298 // createMemCpyOptPass - The public interface to this file...
299 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
301 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
303 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
304 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
305 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
306 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
309 /// tryMergingIntoMemset - When scanning forward over instructions, we look for
310 /// some other patterns to fold away. In particular, this looks for stores to
311 /// neighboring locations of memory. If it sees enough consequtive ones, it
312 /// attempts to merge them together into a memcpy/memset.
313 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
314 Value *StartPtr, Value *ByteVal) {
315 if (TD == 0) return 0;
317 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
319 // Okay, so we now have a single store that can be splatable. Scan to find
320 // all subsequent stores of the same value to offset from the same pointer.
321 // Join these together into ranges, so we can decide whether contiguous blocks
323 MemsetRanges Ranges(*TD);
325 BasicBlock::iterator BI = StartInst;
326 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
327 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
328 // If the call is readnone, ignore it, otherwise bail out. We don't even
329 // allow readonly here because we don't want something like:
330 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
331 if (AA.getModRefBehavior(CallSite(BI)) ==
332 AliasAnalysis::DoesNotAccessMemory)
335 // TODO: If this is a memset, try to join it in.
338 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
341 // If this is a non-store instruction it is fine, ignore it.
342 StoreInst *NextStore = dyn_cast<StoreInst>(BI);
343 if (NextStore == 0) continue;
345 // If this is a store, see if we can merge it in.
346 if (NextStore->isVolatile()) break;
348 // Check to see if this stored value is of the same byte-splattable value.
349 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
352 // Check to see if this store is to a constant offset from the start ptr.
354 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
357 Ranges.addStore(Offset, NextStore);
360 // If we have no ranges, then we just had a single store with nothing that
361 // could be merged in. This is a very common case of course.
365 // If we had at least one store that could be merged in, add the starting
366 // store as well. We try to avoid this unless there is at least something
367 // interesting as a small compile-time optimization.
368 Ranges.addInst(0, StartInst);
370 // If we create any memsets, we put it right before the first instruction that
371 // isn't part of the memset block. This ensure that the memset is dominated
372 // by any addressing instruction needed by the start of the block.
373 IRBuilder<> Builder(BI);
375 // Now that we have full information about ranges, loop over the ranges and
376 // emit memset's for anything big enough to be worthwhile.
377 Instruction *AMemSet = 0;
378 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
380 const MemsetRange &Range = *I;
382 if (Range.TheStores.size() == 1) continue;
384 // If it is profitable to lower this range to memset, do so now.
385 if (!Range.isProfitableToUseMemset(*TD))
388 // Otherwise, we do want to transform this! Create a new memset.
389 // Get the starting pointer of the block.
390 StartPtr = Range.StartPtr;
392 // Determine alignment
393 unsigned Alignment = Range.Alignment;
394 if (Alignment == 0) {
395 const Type *EltType =
396 cast<PointerType>(StartPtr->getType())->getElementType();
397 Alignment = TD->getABITypeAlignment(EltType);
401 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
403 DEBUG(dbgs() << "Replace stores:\n";
404 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
405 dbgs() << *Range.TheStores[i] << '\n';
406 dbgs() << "With: " << *AMemSet << '\n');
408 // Zap all the stores.
409 for (SmallVector<StoreInst*, 16>::const_iterator
410 SI = Range.TheStores.begin(),
411 SE = Range.TheStores.end(); SI != SE; ++SI)
412 (*SI)->eraseFromParent();
420 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
421 if (SI->isVolatile()) return false;
423 if (TD == 0) return false;
425 // Detect cases where we're performing call slot forwarding, but
426 // happen to be using a load-store pair to implement it, rather than
428 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
429 if (!LI->isVolatile() && LI->hasOneUse()) {
430 MemDepResult dep = MD->getDependency(LI);
432 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst()))
433 C = dyn_cast<CallInst>(dep.getInst());
436 bool changed = performCallSlotOptzn(LI,
437 SI->getPointerOperand()->stripPointerCasts(),
438 LI->getPointerOperand()->stripPointerCasts(),
439 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C);
441 MD->removeInstruction(SI);
442 SI->eraseFromParent();
443 LI->eraseFromParent();
451 // There are two cases that are interesting for this code to handle: memcpy
452 // and memset. Right now we only handle memset.
454 // Ensure that the value being stored is something that can be memset'able a
455 // byte at a time like "0" or "-1" or any width, as well as things like
456 // 0xA0A0A0A0 and 0.0.
457 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
458 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
460 BBI = I; // Don't invalidate iterator.
468 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
469 /// and checks for the possibility of a call slot optimization by having
470 /// the call write its result directly into the destination of the memcpy.
471 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
472 Value *cpyDest, Value *cpySrc,
473 uint64_t cpyLen, CallInst *C) {
474 // The general transformation to keep in mind is
476 // call @func(..., src, ...)
477 // memcpy(dest, src, ...)
481 // memcpy(dest, src, ...)
482 // call @func(..., dest, ...)
484 // Since moving the memcpy is technically awkward, we additionally check that
485 // src only holds uninitialized values at the moment of the call, meaning that
486 // the memcpy can be discarded rather than moved.
488 // Deliberately get the source and destination with bitcasts stripped away,
489 // because we'll need to do type comparisons based on the underlying type.
492 // Require that src be an alloca. This simplifies the reasoning considerably.
493 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
497 // Check that all of src is copied to dest.
498 if (TD == 0) return false;
500 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
504 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
505 srcArraySize->getZExtValue();
507 if (cpyLen < srcSize)
510 // Check that accessing the first srcSize bytes of dest will not cause a
511 // trap. Otherwise the transform is invalid since it might cause a trap
512 // to occur earlier than it otherwise would.
513 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
514 // The destination is an alloca. Check it is larger than srcSize.
515 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
519 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
520 destArraySize->getZExtValue();
522 if (destSize < srcSize)
524 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
525 // If the destination is an sret parameter then only accesses that are
526 // outside of the returned struct type can trap.
527 if (!A->hasStructRetAttr())
530 const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
531 uint64_t destSize = TD->getTypeAllocSize(StructTy);
533 if (destSize < srcSize)
539 // Check that src is not accessed except via the call and the memcpy. This
540 // guarantees that it holds only undefined values when passed in (so the final
541 // memcpy can be dropped), that it is not read or written between the call and
542 // the memcpy, and that writing beyond the end of it is undefined.
543 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
544 srcAlloca->use_end());
545 while (!srcUseList.empty()) {
546 User *UI = srcUseList.pop_back_val();
548 if (isa<BitCastInst>(UI)) {
549 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
551 srcUseList.push_back(*I);
552 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
553 if (G->hasAllZeroIndices())
554 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
556 srcUseList.push_back(*I);
559 } else if (UI != C && UI != cpy) {
564 // Since we're changing the parameter to the callsite, we need to make sure
565 // that what would be the new parameter dominates the callsite.
566 DominatorTree &DT = getAnalysis<DominatorTree>();
567 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
568 if (!DT.dominates(cpyDestInst, C))
571 // In addition to knowing that the call does not access src in some
572 // unexpected manner, for example via a global, which we deduce from
573 // the use analysis, we also need to know that it does not sneakily
574 // access dest. We rely on AA to figure this out for us.
575 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
576 if (AA.getModRefInfo(C, cpyDest, srcSize) !=
577 AliasAnalysis::NoModRef)
580 // All the checks have passed, so do the transformation.
581 bool changedArgument = false;
582 for (unsigned i = 0; i < CS.arg_size(); ++i)
583 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
584 if (cpySrc->getType() != cpyDest->getType())
585 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
586 cpyDest->getName(), C);
587 changedArgument = true;
588 if (CS.getArgument(i)->getType() == cpyDest->getType())
589 CS.setArgument(i, cpyDest);
591 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
592 CS.getArgument(i)->getType(), cpyDest->getName(), C));
595 if (!changedArgument)
598 // Drop any cached information about the call, because we may have changed
599 // its dependence information by changing its parameter.
600 MD->removeInstruction(C);
602 // Remove the memcpy.
603 MD->removeInstruction(cpy);
609 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
610 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
611 /// copy from MDep's input if we can. MSize is the size of M's copy.
613 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
615 // We can only transforms memcpy's where the dest of one is the source of the
617 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
620 // If dep instruction is reading from our current input, then it is a noop
621 // transfer and substituting the input won't change this instruction. Just
622 // ignore the input and let someone else zap MDep. This handles cases like:
625 if (M->getSource() == MDep->getSource())
628 // Second, the length of the memcpy's must be the same, or the preceeding one
629 // must be larger than the following one.
630 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
631 if (!C1) return false;
633 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
635 // Verify that the copied-from memory doesn't change in between the two
636 // transfers. For example, in:
640 // It would be invalid to transform the second memcpy into memcpy(c <- b).
642 // TODO: If the code between M and MDep is transparent to the destination "c",
643 // then we could still perform the xform by moving M up to the first memcpy.
645 // NOTE: This is conservative, it will stop on any read from the source loc,
646 // not just the defining memcpy.
647 MemDepResult SourceDep =
648 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
649 false, M, M->getParent());
650 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
653 // If the dest of the second might alias the source of the first, then the
654 // source and dest might overlap. We still want to eliminate the intermediate
655 // value, but we have to generate a memmove instead of memcpy.
656 bool UseMemMove = false;
657 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
660 // If all checks passed, then we can transform M.
662 // Make sure to use the lesser of the alignment of the source and the dest
663 // since we're changing where we're reading from, but don't want to increase
664 // the alignment past what can be read from or written to.
665 // TODO: Is this worth it if we're creating a less aligned memcpy? For
666 // example we could be moving from movaps -> movq on x86.
667 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
669 IRBuilder<> Builder(M);
671 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
672 Align, M->isVolatile());
674 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
675 Align, M->isVolatile());
677 // Remove the instruction we're replacing.
678 MD->removeInstruction(M);
679 M->eraseFromParent();
685 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
686 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
687 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
688 /// circumstances). This allows later passes to remove the first memcpy
690 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
691 // We can only optimize statically-sized memcpy's that are non-volatile.
692 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
693 if (CopySize == 0 || M->isVolatile()) return false;
695 // If the source and destination of the memcpy are the same, then zap it.
696 if (M->getSource() == M->getDest()) {
697 MD->removeInstruction(M);
698 M->eraseFromParent();
702 // If copying from a constant, try to turn the memcpy into a memset.
703 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
704 if (GV->isConstant() && GV->hasDefinitiveInitializer())
705 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
706 IRBuilder<> Builder(M);
707 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize,
708 M->getAlignment(), false);
709 MD->removeInstruction(M);
710 M->eraseFromParent();
715 // The are two possible optimizations we can do for memcpy:
716 // a) memcpy-memcpy xform which exposes redundance for DSE.
717 // b) call-memcpy xform for return slot optimization.
718 MemDepResult DepInfo = MD->getDependency(M);
719 if (!DepInfo.isClobber())
722 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()))
723 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
725 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
726 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
727 CopySize->getZExtValue(), C)) {
728 M->eraseFromParent();
735 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
736 /// are guaranteed not to alias.
737 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
738 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
740 // See if the pointers alias.
741 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
744 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
746 // If not, then we know we can transform this.
747 Module *Mod = M->getParent()->getParent()->getParent();
748 const Type *ArgTys[3] = { M->getRawDest()->getType(),
749 M->getRawSource()->getType(),
750 M->getLength()->getType() };
751 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
754 // MemDep may have over conservative information about this instruction, just
755 // conservatively flush it from the cache.
756 MD->removeInstruction(M);
762 /// processByValArgument - This is called on every byval argument in call sites.
763 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
764 if (TD == 0) return false;
766 // Find out what feeds this byval argument.
767 Value *ByValArg = CS.getArgument(ArgNo);
768 const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType();
769 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
770 MemDepResult DepInfo =
771 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
772 true, CS.getInstruction(),
773 CS.getInstruction()->getParent());
774 if (!DepInfo.isClobber())
777 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
778 // a memcpy, see if we can byval from the source of the memcpy instead of the
780 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
781 if (MDep == 0 || MDep->isVolatile() ||
782 ByValArg->stripPointerCasts() != MDep->getDest())
785 // The length of the memcpy must be larger or equal to the size of the byval.
786 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
787 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
790 // Get the alignment of the byval. If it is greater than the memcpy, then we
791 // can't do the substitution. If the call doesn't specify the alignment, then
792 // it is some target specific value that we can't know.
793 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
794 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign)
797 // Verify that the copied-from memory doesn't change in between the memcpy and
802 // It would be invalid to transform the second memcpy into foo(*b).
804 // NOTE: This is conservative, it will stop on any read from the source loc,
805 // not just the defining memcpy.
806 MemDepResult SourceDep =
807 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
808 false, CS.getInstruction(), MDep->getParent());
809 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
812 Value *TmpCast = MDep->getSource();
813 if (MDep->getSource()->getType() != ByValArg->getType())
814 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
815 "tmpcast", CS.getInstruction());
817 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
818 << " " << *MDep << "\n"
819 << " " << *CS.getInstruction() << "\n");
821 // Otherwise we're good! Update the byval argument.
822 CS.setArgument(ArgNo, TmpCast);
827 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
828 bool MemCpyOpt::iterateOnFunction(Function &F) {
829 bool MadeChange = false;
831 // Walk all instruction in the function.
832 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
833 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
834 // Avoid invalidating the iterator.
835 Instruction *I = BI++;
837 bool RepeatInstruction = false;
839 if (StoreInst *SI = dyn_cast<StoreInst>(I))
840 MadeChange |= processStore(SI, BI);
841 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) {
842 RepeatInstruction = processMemCpy(M);
843 } else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
844 RepeatInstruction = processMemMove(M);
845 } else if (CallSite CS = (Value*)I) {
846 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
847 if (CS.paramHasAttr(i+1, Attribute::ByVal))
848 MadeChange |= processByValArgument(CS, i);
851 // Reprocess the instruction if desired.
852 if (RepeatInstruction) {
862 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
865 bool MemCpyOpt::runOnFunction(Function &F) {
866 bool MadeChange = false;
867 MD = &getAnalysis<MemoryDependenceAnalysis>();
868 TD = getAnalysisIfAvailable<TargetData>();
870 if (!iterateOnFunction(F))