1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/Dominators.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/GetElementPtrTypeIterator.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/Target/TargetData.h"
31 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
32 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
33 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
35 /// isBytewiseValue - If the specified value can be set by repeating the same
36 /// byte in memory, return the i8 value that it is represented with. This is
37 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
38 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
39 /// byte store (e.g. i16 0x1234), return null.
40 static Value *isBytewiseValue(Value *V) {
41 // All byte-wide stores are splatable, even of arbitrary variables.
42 if (V->getType()->isIntegerTy(8)) return V;
44 // Constant float and double values can be handled as integer values if the
45 // corresponding integer value is "byteable". An important case is 0.0.
46 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
47 if (CFP->getType()->isFloatTy())
48 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
49 if (CFP->getType()->isDoubleTy())
50 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
51 // Don't handle long double formats, which have strange constraints.
54 // We can handle constant integers that are power of two in size and a
55 // multiple of 8 bits.
56 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
57 unsigned Width = CI->getBitWidth();
58 if (isPowerOf2_32(Width) && Width > 8) {
59 // We can handle this value if the recursive binary decomposition is the
60 // same at all levels.
61 APInt Val = CI->getValue();
63 while (Val.getBitWidth() != 8) {
64 unsigned NextWidth = Val.getBitWidth()/2;
65 Val2 = Val.lshr(NextWidth);
66 Val2.trunc(Val.getBitWidth()/2);
67 Val.trunc(Val.getBitWidth()/2);
69 // If the top/bottom halves aren't the same, reject it.
73 return ConstantInt::get(V->getContext(), Val);
77 // Conceptually, we could handle things like:
78 // %a = zext i8 %X to i16
81 // but until there is an example that actually needs this, it doesn't seem
82 // worth worrying about.
86 static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
87 bool &VariableIdxFound, TargetData &TD) {
88 // Skip over the first indices.
89 gep_type_iterator GTI = gep_type_begin(GEP);
90 for (unsigned i = 1; i != Idx; ++i, ++GTI)
93 // Compute the offset implied by the rest of the indices.
95 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
96 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
98 return VariableIdxFound = true;
99 if (OpC->isZero()) continue; // No offset.
101 // Handle struct indices, which add their field offset to the pointer.
102 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
103 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
107 // Otherwise, we have a sequential type like an array or vector. Multiply
108 // the index by the ElementSize.
109 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
110 Offset += Size*OpC->getSExtValue();
116 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
117 /// constant offset, and return that constant offset. For example, Ptr1 might
118 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
119 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
121 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
122 // base. After that base, they may have some number of common (and
123 // potentially variable) indices. After that they handle some constant
124 // offset, which determines their offset from each other. At this point, we
125 // handle no other case.
126 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
127 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
128 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
131 // Skip any common indices and track the GEP types.
133 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
134 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
137 bool VariableIdxFound = false;
138 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
139 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
140 if (VariableIdxFound) return false;
142 Offset = Offset2-Offset1;
147 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
148 /// This allows us to analyze stores like:
153 /// which sometimes happens with stores to arrays of structs etc. When we see
154 /// the first store, we make a range [1, 2). The second store extends the range
155 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
156 /// two ranges into [0, 3) which is memset'able.
159 // Start/End - A semi range that describes the span that this range covers.
160 // The range is closed at the start and open at the end: [Start, End).
163 /// StartPtr - The getelementptr instruction that points to the start of the
167 /// Alignment - The known alignment of the first store.
170 /// TheStores - The actual stores that make up this range.
171 SmallVector<StoreInst*, 16> TheStores;
173 bool isProfitableToUseMemset(const TargetData &TD) const;
176 } // end anon namespace
178 bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
179 // If we found more than 8 stores to merge or 64 bytes, use memset.
180 if (TheStores.size() >= 8 || End-Start >= 64) return true;
182 // Assume that the code generator is capable of merging pairs of stores
183 // together if it wants to.
184 if (TheStores.size() <= 2) return false;
186 // If we have fewer than 8 stores, it can still be worthwhile to do this.
187 // For example, merging 4 i8 stores into an i32 store is useful almost always.
188 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
189 // memset will be split into 2 32-bit stores anyway) and doing so can
190 // pessimize the llvm optimizer.
192 // Since we don't have perfect knowledge here, make some assumptions: assume
193 // the maximum GPR width is the same size as the pointer size and assume that
194 // this width can be stored. If so, check to see whether we will end up
195 // actually reducing the number of stores used.
196 unsigned Bytes = unsigned(End-Start);
197 unsigned NumPointerStores = Bytes/TD.getPointerSize();
199 // Assume the remaining bytes if any are done a byte at a time.
200 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
202 // If we will reduce the # stores (according to this heuristic), do the
203 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
205 return TheStores.size() > NumPointerStores+NumByteStores;
211 /// Ranges - A sorted list of the memset ranges. We use std::list here
212 /// because each element is relatively large and expensive to copy.
213 std::list<MemsetRange> Ranges;
214 typedef std::list<MemsetRange>::iterator range_iterator;
217 MemsetRanges(TargetData &td) : TD(td) {}
219 typedef std::list<MemsetRange>::const_iterator const_iterator;
220 const_iterator begin() const { return Ranges.begin(); }
221 const_iterator end() const { return Ranges.end(); }
222 bool empty() const { return Ranges.empty(); }
224 void addStore(int64_t OffsetFromFirst, StoreInst *SI);
227 } // end anon namespace
230 /// addStore - Add a new store to the MemsetRanges data structure. This adds a
231 /// new range for the specified store at the specified offset, merging into
232 /// existing ranges as appropriate.
233 void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
234 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
236 // Do a linear search of the ranges to see if this can be joined and/or to
237 // find the insertion point in the list. We keep the ranges sorted for
238 // simplicity here. This is a linear search of a linked list, which is ugly,
239 // however the number of ranges is limited, so this won't get crazy slow.
240 range_iterator I = Ranges.begin(), E = Ranges.end();
242 while (I != E && Start > I->End)
245 // We now know that I == E, in which case we didn't find anything to merge
246 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
247 // to insert a new range. Handle this now.
248 if (I == E || End < I->Start) {
249 MemsetRange &R = *Ranges.insert(I, MemsetRange());
252 R.StartPtr = SI->getPointerOperand();
253 R.Alignment = SI->getAlignment();
254 R.TheStores.push_back(SI);
258 // This store overlaps with I, add it.
259 I->TheStores.push_back(SI);
261 // At this point, we may have an interval that completely contains our store.
262 // If so, just add it to the interval and return.
263 if (I->Start <= Start && I->End >= End)
266 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
267 // but is not entirely contained within the range.
269 // See if the range extends the start of the range. In this case, it couldn't
270 // possibly cause it to join the prior range, because otherwise we would have
272 if (Start < I->Start) {
274 I->StartPtr = SI->getPointerOperand();
275 I->Alignment = SI->getAlignment();
278 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
279 // is in or right at the end of I), and that End >= I->Start. Extend I out to
283 range_iterator NextI = I;
284 while (++NextI != E && End >= NextI->Start) {
285 // Merge the range in.
286 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
287 if (NextI->End > I->End)
295 //===----------------------------------------------------------------------===//
297 //===----------------------------------------------------------------------===//
300 class MemCpyOpt : public FunctionPass {
301 MemoryDependenceAnalysis *MD;
302 bool runOnFunction(Function &F);
304 static char ID; // Pass identification, replacement for typeid
305 MemCpyOpt() : FunctionPass(ID) {
306 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
311 // This transformation requires dominator postdominator info
312 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
313 AU.setPreservesCFG();
314 AU.addRequired<DominatorTree>();
315 AU.addRequired<MemoryDependenceAnalysis>();
316 AU.addRequired<AliasAnalysis>();
317 AU.addPreserved<AliasAnalysis>();
318 AU.addPreserved<MemoryDependenceAnalysis>();
322 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
323 bool processMemCpy(MemCpyInst *M);
324 bool processMemMove(MemMoveInst *M);
325 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
326 uint64_t cpyLen, CallInst *C);
327 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
329 bool processByValArgument(CallSite CS, unsigned ArgNo);
330 bool iterateOnFunction(Function &F);
333 char MemCpyOpt::ID = 0;
336 // createMemCpyOptPass - The public interface to this file...
337 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
339 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
341 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
342 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
343 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
344 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
347 /// processStore - When GVN is scanning forward over instructions, we look for
348 /// some other patterns to fold away. In particular, this looks for stores to
349 /// neighboring locations of memory. If it sees enough consequtive ones
350 /// (currently 4) it attempts to merge them together into a memcpy/memset.
351 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
352 if (SI->isVolatile()) return false;
354 TargetData *TD = getAnalysisIfAvailable<TargetData>();
355 if (!TD) return false;
357 // Detect cases where we're performing call slot forwarding, but
358 // happen to be using a load-store pair to implement it, rather than
360 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
361 if (!LI->isVolatile() && LI->hasOneUse()) {
362 MemDepResult dep = MD->getDependency(LI);
364 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst()))
365 C = dyn_cast<CallInst>(dep.getInst());
368 bool changed = performCallSlotOptzn(LI,
369 SI->getPointerOperand()->stripPointerCasts(),
370 LI->getPointerOperand()->stripPointerCasts(),
371 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C);
373 MD->removeInstruction(SI);
374 SI->eraseFromParent();
375 LI->eraseFromParent();
383 LLVMContext &Context = SI->getContext();
385 // There are two cases that are interesting for this code to handle: memcpy
386 // and memset. Right now we only handle memset.
388 // Ensure that the value being stored is something that can be memset'able a
389 // byte at a time like "0" or "-1" or any width, as well as things like
390 // 0xA0A0A0A0 and 0.0.
391 Value *ByteVal = isBytewiseValue(SI->getOperand(0));
395 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
396 Module *M = SI->getParent()->getParent()->getParent();
398 // Okay, so we now have a single store that can be splatable. Scan to find
399 // all subsequent stores of the same value to offset from the same pointer.
400 // Join these together into ranges, so we can decide whether contiguous blocks
402 MemsetRanges Ranges(*TD);
404 Value *StartPtr = SI->getPointerOperand();
406 BasicBlock::iterator BI = SI;
407 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
408 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
409 // If the call is readnone, ignore it, otherwise bail out. We don't even
410 // allow readonly here because we don't want something like:
411 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
412 if (AA.getModRefBehavior(CallSite(BI)) ==
413 AliasAnalysis::DoesNotAccessMemory)
416 // TODO: If this is a memset, try to join it in.
419 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
422 // If this is a non-store instruction it is fine, ignore it.
423 StoreInst *NextStore = dyn_cast<StoreInst>(BI);
424 if (NextStore == 0) continue;
426 // If this is a store, see if we can merge it in.
427 if (NextStore->isVolatile()) break;
429 // Check to see if this stored value is of the same byte-splattable value.
430 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
433 // Check to see if this store is to a constant offset from the start ptr.
435 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
438 Ranges.addStore(Offset, NextStore);
441 // If we have no ranges, then we just had a single store with nothing that
442 // could be merged in. This is a very common case of course.
446 // If we had at least one store that could be merged in, add the starting
447 // store as well. We try to avoid this unless there is at least something
448 // interesting as a small compile-time optimization.
449 Ranges.addStore(0, SI);
452 // Now that we have full information about ranges, loop over the ranges and
453 // emit memset's for anything big enough to be worthwhile.
454 bool MadeChange = false;
455 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
457 const MemsetRange &Range = *I;
459 if (Range.TheStores.size() == 1) continue;
461 // If it is profitable to lower this range to memset, do so now.
462 if (!Range.isProfitableToUseMemset(*TD))
465 // Otherwise, we do want to transform this! Create a new memset. We put
466 // the memset right before the first instruction that isn't part of this
467 // memset block. This ensure that the memset is dominated by any addressing
468 // instruction needed by the start of the block.
469 BasicBlock::iterator InsertPt = BI;
471 // Get the starting pointer of the block.
472 StartPtr = Range.StartPtr;
474 // Determine alignment
475 unsigned Alignment = Range.Alignment;
476 if (Alignment == 0) {
477 const Type *EltType =
478 cast<PointerType>(StartPtr->getType())->getElementType();
479 Alignment = TD->getABITypeAlignment(EltType);
482 // Cast the start ptr to be i8* as memset requires.
483 const PointerType* StartPTy = cast<PointerType>(StartPtr->getType());
484 const PointerType *i8Ptr = Type::getInt8PtrTy(Context,
485 StartPTy->getAddressSpace());
486 if (StartPTy!= i8Ptr)
487 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
491 StartPtr, ByteVal, // Start, value
493 ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
495 ConstantInt::get(Type::getInt32Ty(Context), Alignment),
497 ConstantInt::getFalse(Context),
499 const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
501 Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
503 Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt);
504 DEBUG(dbgs() << "Replace stores:\n";
505 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
506 dbgs() << *Range.TheStores[i] << '\n';
507 dbgs() << "With: " << *C << '\n'); C=C;
509 // Don't invalidate the iterator
512 // Zap all the stores.
513 for (SmallVector<StoreInst*, 16>::const_iterator
514 SI = Range.TheStores.begin(),
515 SE = Range.TheStores.end(); SI != SE; ++SI)
516 (*SI)->eraseFromParent();
525 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
526 /// and checks for the possibility of a call slot optimization by having
527 /// the call write its result directly into the destination of the memcpy.
528 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
529 Value *cpyDest, Value *cpySrc,
530 uint64_t cpyLen, CallInst *C) {
531 // The general transformation to keep in mind is
533 // call @func(..., src, ...)
534 // memcpy(dest, src, ...)
538 // memcpy(dest, src, ...)
539 // call @func(..., dest, ...)
541 // Since moving the memcpy is technically awkward, we additionally check that
542 // src only holds uninitialized values at the moment of the call, meaning that
543 // the memcpy can be discarded rather than moved.
545 // Deliberately get the source and destination with bitcasts stripped away,
546 // because we'll need to do type comparisons based on the underlying type.
549 // Require that src be an alloca. This simplifies the reasoning considerably.
550 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
554 // Check that all of src is copied to dest.
555 TargetData *TD = getAnalysisIfAvailable<TargetData>();
556 if (!TD) return false;
558 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
562 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
563 srcArraySize->getZExtValue();
565 if (cpyLen < srcSize)
568 // Check that accessing the first srcSize bytes of dest will not cause a
569 // trap. Otherwise the transform is invalid since it might cause a trap
570 // to occur earlier than it otherwise would.
571 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
572 // The destination is an alloca. Check it is larger than srcSize.
573 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
577 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
578 destArraySize->getZExtValue();
580 if (destSize < srcSize)
582 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
583 // If the destination is an sret parameter then only accesses that are
584 // outside of the returned struct type can trap.
585 if (!A->hasStructRetAttr())
588 const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
589 uint64_t destSize = TD->getTypeAllocSize(StructTy);
591 if (destSize < srcSize)
597 // Check that src is not accessed except via the call and the memcpy. This
598 // guarantees that it holds only undefined values when passed in (so the final
599 // memcpy can be dropped), that it is not read or written between the call and
600 // the memcpy, and that writing beyond the end of it is undefined.
601 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
602 srcAlloca->use_end());
603 while (!srcUseList.empty()) {
604 User *UI = srcUseList.pop_back_val();
606 if (isa<BitCastInst>(UI)) {
607 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
609 srcUseList.push_back(*I);
610 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
611 if (G->hasAllZeroIndices())
612 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
614 srcUseList.push_back(*I);
617 } else if (UI != C && UI != cpy) {
622 // Since we're changing the parameter to the callsite, we need to make sure
623 // that what would be the new parameter dominates the callsite.
624 DominatorTree &DT = getAnalysis<DominatorTree>();
625 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
626 if (!DT.dominates(cpyDestInst, C))
629 // In addition to knowing that the call does not access src in some
630 // unexpected manner, for example via a global, which we deduce from
631 // the use analysis, we also need to know that it does not sneakily
632 // access dest. We rely on AA to figure this out for us.
633 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
634 if (AA.getModRefInfo(C, cpyDest, srcSize) !=
635 AliasAnalysis::NoModRef)
638 // All the checks have passed, so do the transformation.
639 bool changedArgument = false;
640 for (unsigned i = 0; i < CS.arg_size(); ++i)
641 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
642 if (cpySrc->getType() != cpyDest->getType())
643 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
644 cpyDest->getName(), C);
645 changedArgument = true;
646 if (CS.getArgument(i)->getType() == cpyDest->getType())
647 CS.setArgument(i, cpyDest);
649 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
650 CS.getArgument(i)->getType(), cpyDest->getName(), C));
653 if (!changedArgument)
656 // Drop any cached information about the call, because we may have changed
657 // its dependence information by changing its parameter.
658 MD->removeInstruction(C);
660 // Remove the memcpy.
661 MD->removeInstruction(cpy);
667 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
668 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
669 /// copy from MDep's input if we can. MSize is the size of M's copy.
671 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
673 // We can only transforms memcpy's where the dest of one is the source of the
675 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
678 // Second, the length of the memcpy's must be the same, or the preceeding one
679 // must be larger than the following one.
680 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
681 if (!C1) return false;
683 uint64_t DepSize = C1->getValue().getZExtValue();
687 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
689 // Verify that the copied-from memory doesn't change in between the two
690 // transfers. For example, in:
694 // It would be invalid to transform the second memcpy into memcpy(c <- b).
696 // TODO: If the code between M and MDep is transparent to the destination "c",
697 // then we could still perform the xform by moving M up to the first memcpy.
699 // NOTE: This is conservative, it will stop on any read from the source loc,
700 // not just the defining memcpy.
701 MemDepResult SourceDep =
702 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
703 false, M, M->getParent());
704 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
707 // If the dest of the second might alias the source of the first, then the
708 // source and dest might overlap. We still want to eliminate the intermediate
709 // value, but we have to generate a memmove instead of memcpy.
710 Intrinsic::ID ResultFn = Intrinsic::memcpy;
711 if (!AA.isNoAlias(M->getRawDest(), MSize, MDep->getRawSource(), DepSize))
712 ResultFn = Intrinsic::memmove;
714 // If all checks passed, then we can transform M.
715 const Type *ArgTys[3] = {
716 M->getRawDest()->getType(),
717 MDep->getRawSource()->getType(),
718 M->getLength()->getType()
720 Function *MemCpyFun =
721 Intrinsic::getDeclaration(MDep->getParent()->getParent()->getParent(),
722 ResultFn, ArgTys, 3);
724 // Make sure to use the lesser of the alignment of the source and the dest
725 // since we're changing where we're reading from, but don't want to increase
726 // the alignment past what can be read from or written to.
727 // TODO: Is this worth it if we're creating a less aligned memcpy? For
728 // example we could be moving from movaps -> movq on x86.
729 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
732 MDep->getRawSource(),
734 ConstantInt::get(Type::getInt32Ty(MemCpyFun->getContext()), Align),
737 CallInst::Create(MemCpyFun, Args, Args+5, "", M);
739 // Remove the instruction we're replacing.
740 MD->removeInstruction(M);
741 M->eraseFromParent();
747 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
748 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
749 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
750 /// circumstances). This allows later passes to remove the first memcpy
752 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
753 // We can only optimize statically-sized memcpy's that are non-volatile.
754 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
755 if (CopySize == 0 || M->isVolatile()) return false;
757 // The are two possible optimizations we can do for memcpy:
758 // a) memcpy-memcpy xform which exposes redundance for DSE.
759 // b) call-memcpy xform for return slot optimization.
760 MemDepResult DepInfo = MD->getDependency(M);
761 if (!DepInfo.isClobber())
764 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()))
765 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
767 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
768 bool changed = performCallSlotOptzn(M, M->getDest(), M->getSource(),
769 CopySize->getZExtValue(), C);
770 if (changed) M->eraseFromParent();
776 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
777 /// are guaranteed not to alias.
778 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
779 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
781 // If the memmove is a constant size, use it for the alias query, this allows
782 // us to optimize things like: memmove(P, P+64, 64);
783 uint64_t MemMoveSize = AliasAnalysis::UnknownSize;
784 if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength()))
785 MemMoveSize = Len->getZExtValue();
787 // See if the pointers alias.
788 if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) !=
789 AliasAnalysis::NoAlias)
792 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
794 // If not, then we know we can transform this.
795 Module *Mod = M->getParent()->getParent()->getParent();
796 const Type *ArgTys[3] = { M->getRawDest()->getType(),
797 M->getRawSource()->getType(),
798 M->getLength()->getType() };
799 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
802 // MemDep may have over conservative information about this instruction, just
803 // conservatively flush it from the cache.
804 MD->removeInstruction(M);
810 /// processByValArgument - This is called on every byval argument in call sites.
811 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
812 TargetData *TD = getAnalysisIfAvailable<TargetData>();
813 if (!TD) return false;
815 // Find out what feeds this byval argument.
816 Value *ByValArg = CS.getArgument(ArgNo);
817 const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType();
818 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
819 MemDepResult DepInfo =
820 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
821 true, CS.getInstruction(),
822 CS.getInstruction()->getParent());
823 if (!DepInfo.isClobber())
826 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
827 // a memcpy, see if we can byval from the source of the memcpy instead of the
829 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
830 if (MDep == 0 || MDep->isVolatile() ||
831 ByValArg->stripPointerCasts() != MDep->getDest())
834 // The length of the memcpy must be larger or equal to the size of the byval.
835 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
836 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
839 // Get the alignment of the byval. If it is greater than the memcpy, then we
840 // can't do the substitution. If the call doesn't specify the alignment, then
841 // it is some target specific value that we can't know.
842 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
843 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign)
846 // Verify that the copied-from memory doesn't change in between the memcpy and
851 // It would be invalid to transform the second memcpy into foo(*b).
853 // NOTE: This is conservative, it will stop on any read from the source loc,
854 // not just the defining memcpy.
855 MemDepResult SourceDep =
856 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
857 false, CS.getInstruction(), MDep->getParent());
858 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
861 Value *TmpCast = MDep->getSource();
862 if (MDep->getSource()->getType() != ByValArg->getType())
863 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
864 "tmpcast", CS.getInstruction());
866 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
867 << " " << *MDep << "\n"
868 << " " << *CS.getInstruction() << "\n");
870 // Otherwise we're good! Update the byval argument.
871 CS.setArgument(ArgNo, TmpCast);
876 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
877 bool MemCpyOpt::iterateOnFunction(Function &F) {
878 bool MadeChange = false;
880 // Walk all instruction in the function.
881 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
882 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
883 // Avoid invalidating the iterator.
884 Instruction *I = BI++;
886 bool RepeatInstruction = false;
888 if (StoreInst *SI = dyn_cast<StoreInst>(I))
889 MadeChange |= processStore(SI, BI);
890 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) {
891 RepeatInstruction = processMemCpy(M);
892 } else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
893 RepeatInstruction = processMemMove(M);
894 } else if (CallSite CS = (Value*)I) {
895 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
896 if (CS.paramHasAttr(i+1, Attribute::ByVal))
897 MadeChange |= processByValArgument(CS, i);
900 // Reprocess the instruction if desired.
901 if (RepeatInstruction) {
911 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
914 bool MemCpyOpt::runOnFunction(Function &F) {
915 bool MadeChange = false;
916 MD = &getAnalysis<MemoryDependenceAnalysis>();
918 if (!iterateOnFunction(F))