X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FMemCpyOptimizer.cpp;h=a87cce3f9d3eda7dee5573979a3c19754c13c98a;hp=7ed85889a05ccfa455429bf3e5d0a9b4e0ecd83e;hb=b83a67e1e3fe210bd99a82eccd3dc5b1b44f1503;hpb=d13db2c59cc94162d6cf0a04187d408bfef6d4a7 diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 7ed85889a05..a87cce3f9d3 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -14,80 +14,32 @@ #define DEBUG_TYPE "memcpyopt" #include "llvm/Transforms/Scalar.h" +#include "llvm/GlobalVariable.h" #include "llvm/IntrinsicInst.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Transforms/Utils/Local.h" #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/IRBuilder.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetLibraryInfo.h" #include using namespace llvm; STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); STATISTIC(NumMemSetInfer, "Number of memsets inferred"); STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); - -/// isBytewiseValue - If the specified value can be set by repeating the same -/// byte in memory, return the i8 value that it is represented with. This is -/// true for all i8 values obviously, but is also true for i32 0, i32 -1, -/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated -/// byte store (e.g. i16 0x1234), return null. -static Value *isBytewiseValue(Value *V) { - LLVMContext &Context = V->getContext(); - - // All byte-wide stores are splatable, even of arbitrary variables. - if (V->getType()->isIntegerTy(8)) return V; - - // Constant float and double values can be handled as integer values if the - // corresponding integer value is "byteable". An important case is 0.0. - if (ConstantFP *CFP = dyn_cast(V)) { - if (CFP->getType()->isFloatTy()) - V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context)); - if (CFP->getType()->isDoubleTy()) - V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context)); - // Don't handle long double formats, which have strange constraints. - } - - // We can handle constant integers that are power of two in size and a - // multiple of 8 bits. - if (ConstantInt *CI = dyn_cast(V)) { - unsigned Width = CI->getBitWidth(); - if (isPowerOf2_32(Width) && Width > 8) { - // We can handle this value if the recursive binary decomposition is the - // same at all levels. - APInt Val = CI->getValue(); - APInt Val2; - while (Val.getBitWidth() != 8) { - unsigned NextWidth = Val.getBitWidth()/2; - Val2 = Val.lshr(NextWidth); - Val2.trunc(Val.getBitWidth()/2); - Val.trunc(Val.getBitWidth()/2); - - // If the top/bottom halves aren't the same, reject it. - if (Val != Val2) - return 0; - } - return ConstantInt::get(Context, Val); - } - } - - // Conceptually, we could handle things like: - // %a = zext i8 %X to i16 - // %b = shl i16 %a, 8 - // %c = or i16 %a, %b - // but until there is an example that actually needs this, it doesn't seem - // worth worrying about. - return 0; -} +STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, - bool &VariableIdxFound, TargetData &TD) { + bool &VariableIdxFound, const TargetData &TD){ // Skip over the first indices. gep_type_iterator GTI = gep_type_begin(GEP); for (unsigned i = 1; i != Idx; ++i, ++GTI) @@ -102,7 +54,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. - if (const StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = dyn_cast(*GTI)) { Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } @@ -120,14 +72,31 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, /// constant offset, and return that constant offset. For example, Ptr1 might /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, - TargetData &TD) { + const TargetData &TD) { + Ptr1 = Ptr1->stripPointerCasts(); + Ptr2 = Ptr2->stripPointerCasts(); + GetElementPtrInst *GEP1 = dyn_cast(Ptr1); + GetElementPtrInst *GEP2 = dyn_cast(Ptr2); + + bool VariableIdxFound = false; + + // If one pointer is a GEP and the other isn't, then see if the GEP is a + // constant offset from the base, as in "P" and "gep P, 1". + if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { + Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD); + return !VariableIdxFound; + } + + if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { + Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD); + return !VariableIdxFound; + } + // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical // base. After that base, they may have some number of common (and // potentially variable) indices. After that they handle some constant // offset, which determines their offset from each other. At this point, we // handle no other case. - GetElementPtrInst *GEP1 = dyn_cast(Ptr1); - GetElementPtrInst *GEP2 = dyn_cast(Ptr2); if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) return false; @@ -137,7 +106,6 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) break; - bool VariableIdxFound = false; int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); if (VariableIdxFound) return false; @@ -171,7 +139,7 @@ struct MemsetRange { unsigned Alignment; /// TheStores - The actual stores that make up this range. - SmallVector TheStores; + SmallVector TheStores; bool isProfitableToUseMemset(const TargetData &TD) const; @@ -179,12 +147,21 @@ struct MemsetRange { } // end anon namespace bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { - // If we found more than 8 stores to merge or 64 bytes, use memset. - if (TheStores.size() >= 8 || End-Start >= 64) return true; + // If we found more than 4 stores to merge or 16 bytes, use memset. + if (TheStores.size() >= 4 || End-Start >= 16) return true; + + // If there is nothing to merge, don't do anything. + if (TheStores.size() < 2) return false; + + // If any of the stores are a memset, then it is always good to extend the + // memset. + for (unsigned i = 0, e = TheStores.size(); i != e; ++i) + if (!isa(TheStores[i])) + return true; // Assume that the code generator is capable of merging pairs of stores // together if it wants to. - if (TheStores.size() <= 2) return false; + if (TheStores.size() == 2) return false; // If we have fewer than 8 stores, it can still be worthwhile to do this. // For example, merging 4 i8 stores into an i32 store is useful almost always. @@ -215,31 +192,53 @@ class MemsetRanges { /// because each element is relatively large and expensive to copy. std::list Ranges; typedef std::list::iterator range_iterator; - TargetData &TD; + const TargetData &TD; public: - MemsetRanges(TargetData &td) : TD(td) {} + MemsetRanges(const TargetData &td) : TD(td) {} typedef std::list::const_iterator const_iterator; const_iterator begin() const { return Ranges.begin(); } const_iterator end() const { return Ranges.end(); } bool empty() const { return Ranges.empty(); } - void addStore(int64_t OffsetFromFirst, StoreInst *SI); + void addInst(int64_t OffsetFromFirst, Instruction *Inst) { + if (StoreInst *SI = dyn_cast(Inst)) + addStore(OffsetFromFirst, SI); + else + addMemSet(OffsetFromFirst, cast(Inst)); + } + + void addStore(int64_t OffsetFromFirst, StoreInst *SI) { + int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); + + addRange(OffsetFromFirst, StoreSize, + SI->getPointerOperand(), SI->getAlignment(), SI); + } + + void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { + int64_t Size = cast(MSI->getLength())->getZExtValue(); + addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); + } + + void addRange(int64_t Start, int64_t Size, Value *Ptr, + unsigned Alignment, Instruction *Inst); + }; } // end anon namespace -/// addStore - Add a new store to the MemsetRanges data structure. This adds a +/// addRange - Add a new store to the MemsetRanges data structure. This adds a /// new range for the specified store at the specified offset, merging into /// existing ranges as appropriate. -void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { - int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); - - // Do a linear search of the ranges to see if this can be joined and/or to - // find the insertion point in the list. We keep the ranges sorted for - // simplicity here. This is a linear search of a linked list, which is ugly, - // however the number of ranges is limited, so this won't get crazy slow. +/// +/// Do a linear search of the ranges to see if this can be joined and/or to +/// find the insertion point in the list. We keep the ranges sorted for +/// simplicity here. This is a linear search of a linked list, which is ugly, +/// however the number of ranges is limited, so this won't get crazy slow. +void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, + unsigned Alignment, Instruction *Inst) { + int64_t End = Start+Size; range_iterator I = Ranges.begin(), E = Ranges.end(); while (I != E && Start > I->End) @@ -252,14 +251,14 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { MemsetRange &R = *Ranges.insert(I, MemsetRange()); R.Start = Start; R.End = End; - R.StartPtr = SI->getPointerOperand(); - R.Alignment = SI->getAlignment(); - R.TheStores.push_back(SI); + R.StartPtr = Ptr; + R.Alignment = Alignment; + R.TheStores.push_back(Inst); return; } - + // This store overlaps with I, add it. - I->TheStores.push_back(SI); + I->TheStores.push_back(Inst); // At this point, we may have an interval that completely contains our store. // If so, just add it to the interval and return. @@ -274,8 +273,8 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { // stopped on *it*. if (Start < I->Start) { I->Start = Start; - I->StartPtr = SI->getPointerOperand(); - I->Alignment = SI->getAlignment(); + I->StartPtr = Ptr; + I->Alignment = Alignment; } // Now we know that Start <= I->End and Start >= I->Start (so the startpoint @@ -301,10 +300,19 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { namespace { class MemCpyOpt : public FunctionPass { - bool runOnFunction(Function &F); + MemoryDependenceAnalysis *MD; + TargetLibraryInfo *TLI; + const TargetData *TD; public: static char ID; // Pass identification, replacement for typeid - MemCpyOpt() : FunctionPass(&ID) {} + MemCpyOpt() : FunctionPass(ID) { + initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); + MD = 0; + TLI = 0; + TD = 0; + } + + bool runOnFunction(Function &F); private: // This transformation requires dominator postdominator info @@ -313,15 +321,24 @@ namespace { AU.addRequired(); AU.addRequired(); AU.addRequired(); + AU.addRequired(); AU.addPreserved(); AU.addPreserved(); } // Helper fuctions bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); + bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); bool processMemCpy(MemCpyInst *M); bool processMemMove(MemMoveInst *M); - bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C); + bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, + uint64_t cpyLen, CallInst *C); + bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, + uint64_t MSize); + bool processByValArgument(CallSite CS, unsigned ArgNo); + Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, + Value *ByteVal); + bool iterateOnFunction(Function &F); }; @@ -331,165 +348,218 @@ namespace { // createMemCpyOptPass - The public interface to this file... FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } -INITIALIZE_PASS(MemCpyOpt, "memcpyopt", "MemCpy Optimization", false, false); - +INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", + false, false) +INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", + false, false) - -/// processStore - When GVN is scanning forward over instructions, we look for +/// tryMergingIntoMemset - When scanning forward over instructions, we look for /// some other patterns to fold away. In particular, this looks for stores to -/// neighboring locations of memory. If it sees enough consequtive ones -/// (currently 4) it attempts to merge them together into a memcpy/memset. -bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { - if (SI->isVolatile()) return false; - - LLVMContext &Context = SI->getContext(); - - // There are two cases that are interesting for this code to handle: memcpy - // and memset. Right now we only handle memset. +/// neighboring locations of memory. If it sees enough consecutive ones, it +/// attempts to merge them together into a memcpy/memset. +Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, + Value *StartPtr, Value *ByteVal) { + if (TD == 0) return 0; - // Ensure that the value being stored is something that can be memset'able a - // byte at a time like "0" or "-1" or any width, as well as things like - // 0xA0A0A0A0 and 0.0. - Value *ByteVal = isBytewiseValue(SI->getOperand(0)); - if (!ByteVal) - return false; - - TargetData *TD = getAnalysisIfAvailable(); - if (!TD) return false; - AliasAnalysis &AA = getAnalysis(); - Module *M = SI->getParent()->getParent()->getParent(); - // Okay, so we now have a single store that can be splatable. Scan to find // all subsequent stores of the same value to offset from the same pointer. // Join these together into ranges, so we can decide whether contiguous blocks // are stored. MemsetRanges Ranges(*TD); - Value *StartPtr = SI->getPointerOperand(); - - BasicBlock::iterator BI = SI; + BasicBlock::iterator BI = StartInst; for (++BI; !isa(BI); ++BI) { - if (isa(BI) || isa(BI)) { - // If the call is readnone, ignore it, otherwise bail out. We don't even - // allow readonly here because we don't want something like: + if (!isa(BI) && !isa(BI)) { + // If the instruction is readnone, ignore it, otherwise bail out. We + // don't even allow readonly here because we don't want something like: // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). - if (AA.getModRefBehavior(CallSite::get(BI)) == - AliasAnalysis::DoesNotAccessMemory) - continue; - - // TODO: If this is a memset, try to join it in. - - break; - } else if (isa(BI) || isa(BI)) - break; - - // If this is a non-store instruction it is fine, ignore it. - StoreInst *NextStore = dyn_cast(BI); - if (NextStore == 0) continue; + if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) + break; + continue; + } - // If this is a store, see if we can merge it in. - if (NextStore->isVolatile()) break; + if (StoreInst *NextStore = dyn_cast(BI)) { + // If this is a store, see if we can merge it in. + if (!NextStore->isSimple()) break; - // Check to see if this stored value is of the same byte-splattable value. - if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) - break; - - // Check to see if this store is to a constant offset from the start ptr. - int64_t Offset; - if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD)) - break; - - Ranges.addStore(Offset, NextStore); + // Check to see if this stored value is of the same byte-splattable value. + if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) + break; + + // Check to see if this store is to a constant offset from the start ptr. + int64_t Offset; + if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), + Offset, *TD)) + break; + + Ranges.addStore(Offset, NextStore); + } else { + MemSetInst *MSI = cast(BI); + + if (MSI->isVolatile() || ByteVal != MSI->getValue() || + !isa(MSI->getLength())) + break; + + // Check to see if this store is to a constant offset from the start ptr. + int64_t Offset; + if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) + break; + + Ranges.addMemSet(Offset, MSI); + } } - + // If we have no ranges, then we just had a single store with nothing that // could be merged in. This is a very common case of course. if (Ranges.empty()) - return false; + return 0; // If we had at least one store that could be merged in, add the starting // store as well. We try to avoid this unless there is at least something // interesting as a small compile-time optimization. - Ranges.addStore(0, SI); - - + Ranges.addInst(0, StartInst); + + // If we create any memsets, we put it right before the first instruction that + // isn't part of the memset block. This ensure that the memset is dominated + // by any addressing instruction needed by the start of the block. + IRBuilder<> Builder(BI); + // Now that we have full information about ranges, loop over the ranges and // emit memset's for anything big enough to be worthwhile. - bool MadeChange = false; + Instruction *AMemSet = 0; for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { const MemsetRange &Range = *I; - + if (Range.TheStores.size() == 1) continue; // If it is profitable to lower this range to memset, do so now. if (!Range.isProfitableToUseMemset(*TD)) continue; - // Otherwise, we do want to transform this! Create a new memset. We put - // the memset right before the first instruction that isn't part of this - // memset block. This ensure that the memset is dominated by any addressing - // instruction needed by the start of the block. - BasicBlock::iterator InsertPt = BI; - + // Otherwise, we do want to transform this! Create a new memset. // Get the starting pointer of the block. StartPtr = Range.StartPtr; - + // Determine alignment unsigned Alignment = Range.Alignment; if (Alignment == 0) { - const Type *EltType = - cast(StartPtr->getType())->getElementType(); + Type *EltType = + cast(StartPtr->getType())->getElementType(); Alignment = TD->getABITypeAlignment(EltType); } - - // Cast the start ptr to be i8* as memset requires. - const PointerType* StartPTy = cast(StartPtr->getType()); - const PointerType *i8Ptr = Type::getInt8PtrTy(Context, - StartPTy->getAddressSpace()); - if (StartPTy!= i8Ptr) - StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), - InsertPt); - - Value *Ops[] = { - StartPtr, ByteVal, // Start, value - // size - ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start), - // align - ConstantInt::get(Type::getInt32Ty(Context), Alignment), - // volatile - ConstantInt::get(Type::getInt1Ty(Context), 0), - }; - const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; - - Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2); - - Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt); + + AMemSet = + Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); + DEBUG(dbgs() << "Replace stores:\n"; for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) - dbgs() << *Range.TheStores[i]; - dbgs() << "With: " << *C); C=C; - - // Don't invalidate the iterator - BBI = BI; - + dbgs() << *Range.TheStores[i] << '\n'; + dbgs() << "With: " << *AMemSet << '\n'); + + if (!Range.TheStores.empty()) + AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); + // Zap all the stores. - for (SmallVector::const_iterator + for (SmallVector::const_iterator SI = Range.TheStores.begin(), - SE = Range.TheStores.end(); SI != SE; ++SI) + SE = Range.TheStores.end(); SI != SE; ++SI) { + MD->removeInstruction(*SI); (*SI)->eraseFromParent(); + } ++NumMemSetInfer; - MadeChange = true; } - return MadeChange; + return AMemSet; +} + + +bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { + if (!SI->isSimple()) return false; + + if (TD == 0) return false; + + // Detect cases where we're performing call slot forwarding, but + // happen to be using a load-store pair to implement it, rather than + // a memcpy. + if (LoadInst *LI = dyn_cast(SI->getOperand(0))) { + if (LI->isSimple() && LI->hasOneUse() && + LI->getParent() == SI->getParent()) { + MemDepResult ldep = MD->getDependency(LI); + CallInst *C = 0; + if (ldep.isClobber() && !isa(ldep.getInst())) + C = dyn_cast(ldep.getInst()); + + if (C) { + // Check that nothing touches the dest of the "copy" between + // the call and the store. + AliasAnalysis &AA = getAnalysis(); + AliasAnalysis::Location StoreLoc = AA.getLocation(SI); + for (BasicBlock::iterator I = --BasicBlock::iterator(SI), + E = C; I != E; --I) { + if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) { + C = 0; + break; + } + } + } + + if (C) { + bool changed = performCallSlotOptzn(LI, + SI->getPointerOperand()->stripPointerCasts(), + LI->getPointerOperand()->stripPointerCasts(), + TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); + if (changed) { + MD->removeInstruction(SI); + SI->eraseFromParent(); + MD->removeInstruction(LI); + LI->eraseFromParent(); + ++NumMemCpyInstr; + return true; + } + } + } + } + + // There are two cases that are interesting for this code to handle: memcpy + // and memset. Right now we only handle memset. + + // Ensure that the value being stored is something that can be memset'able a + // byte at a time like "0" or "-1" or any width, as well as things like + // 0xA0A0A0A0 and 0.0. + if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) + if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), + ByteVal)) { + BBI = I; // Don't invalidate iterator. + return true; + } + + return false; +} + +bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { + // See if there is another memset or store neighboring this memset which + // allows us to widen out the memset to do a single larger store. + if (isa(MSI->getLength()) && !MSI->isVolatile()) + if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), + MSI->getValue())) { + BBI = I; // Don't invalidate iterator. + return true; + } + return false; } /// performCallSlotOptzn - takes a memcpy and a call that it depends on, /// and checks for the possibility of a call slot optimization by having /// the call write its result directly into the destination of the memcpy. -bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { +bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, + Value *cpyDest, Value *cpySrc, + uint64_t cpyLen, CallInst *C) { // The general transformation to keep in mind is // // call @func(..., src, ...) @@ -506,15 +576,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { // Deliberately get the source and destination with bitcasts stripped away, // because we'll need to do type comparisons based on the underlying type. - Value *cpyDest = cpy->getDest(); - Value *cpySrc = cpy->getSource(); - CallSite CS = CallSite::get(C); - - // We need to be able to reason about the size of the memcpy, so we require - // that it be a constant. - ConstantInt *cpyLength = dyn_cast(cpy->getLength()); - if (!cpyLength) - return false; + CallSite CS(C); // Require that src be an alloca. This simplifies the reasoning considerably. AllocaInst *srcAlloca = dyn_cast(cpySrc); @@ -522,8 +584,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { return false; // Check that all of src is copied to dest. - TargetData *TD = getAnalysisIfAvailable(); - if (!TD) return false; + if (TD == 0) return false; ConstantInt *srcArraySize = dyn_cast(srcAlloca->getArraySize()); if (!srcArraySize) @@ -532,7 +593,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * srcArraySize->getZExtValue(); - if (cpyLength->getZExtValue() < srcSize) + if (cpyLen < srcSize) return false; // Check that accessing the first srcSize bytes of dest will not cause a @@ -555,7 +616,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { if (!A->hasStructRetAttr()) return false; - const Type *StructTy = cast(A->getType())->getElementType(); + Type *StructTy = cast(A->getType())->getElementType(); uint64_t destSize = TD->getTypeAllocSize(StructTy); if (destSize < srcSize) @@ -601,8 +662,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { // the use analysis, we also need to know that it does not sneakily // access dest. We rely on AA to figure this out for us. AliasAnalysis &AA = getAnalysis(); - if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) != - AliasAnalysis::NoModRef) + if (AA.getModRefInfo(C, cpyDest, srcSize) != AliasAnalysis::NoModRef) return false; // All the checks have passed, so do the transformation. @@ -625,98 +685,146 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { // Drop any cached information about the call, because we may have changed // its dependence information by changing its parameter. - MemoryDependenceAnalysis &MD = getAnalysis(); - MD.removeInstruction(C); + MD->removeInstruction(C); - // Remove the memcpy - MD.removeInstruction(cpy); - cpy->eraseFromParent(); + // Remove the memcpy. + MD->removeInstruction(cpy); ++NumMemCpyInstr; return true; } -/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which -/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be -/// a memcpy from X to Z (or potentially a memmove, depending on circumstances). -/// This allows later passes to remove the first memcpy altogether. -bool MemCpyOpt::processMemCpy(MemCpyInst *M) { - MemoryDependenceAnalysis &MD = getAnalysis(); - - // The are two possible optimizations we can do for memcpy: - // a) memcpy-memcpy xform which exposes redundance for DSE. - // b) call-memcpy xform for return slot optimization. - MemDepResult dep = MD.getDependency(M); - if (!dep.isClobber()) - return false; - if (!isa(dep.getInst())) { - if (CallInst *C = dyn_cast(dep.getInst())) - return performCallSlotOptzn(M, C); - return false; - } - - MemCpyInst *MDep = cast(dep.getInst()); - +/// processMemCpyMemCpyDependence - We've found that the (upward scanning) +/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to +/// copy from MDep's input if we can. MSize is the size of M's copy. +/// +bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, + uint64_t MSize) { // We can only transforms memcpy's where the dest of one is the source of the - // other - if (M->getSource() != MDep->getDest()) + // other. + if (M->getSource() != MDep->getDest() || MDep->isVolatile()) return false; - // Second, the length of the memcpy's must be the same, or the preceeding one - // must be larger than the following one. - ConstantInt *C1 = dyn_cast(MDep->getLength()); - ConstantInt *C2 = dyn_cast(M->getLength()); - if (!C1 || !C2) + // If dep instruction is reading from our current input, then it is a noop + // transfer and substituting the input won't change this instruction. Just + // ignore the input and let someone else zap MDep. This handles cases like: + // memcpy(a <- a) + // memcpy(b <- a) + if (M->getSource() == MDep->getSource()) return false; - uint64_t DepSize = C1->getValue().getZExtValue(); - uint64_t CpySize = C2->getValue().getZExtValue(); - - if (DepSize < CpySize) + // Second, the length of the memcpy's must be the same, or the preceding one + // must be larger than the following one. + ConstantInt *MDepLen = dyn_cast(MDep->getLength()); + ConstantInt *MLen = dyn_cast(M->getLength()); + if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) return false; - // Finally, we have to make sure that the dest of the second does not - // alias the source of the first AliasAnalysis &AA = getAnalysis(); - if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) != - AliasAnalysis::NoAlias) - return false; - else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) != - AliasAnalysis::NoAlias) - return false; - else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize) - != AliasAnalysis::NoAlias) + + // Verify that the copied-from memory doesn't change in between the two + // transfers. For example, in: + // memcpy(a <- b) + // *b = 42; + // memcpy(c <- a) + // It would be invalid to transform the second memcpy into memcpy(c <- b). + // + // TODO: If the code between M and MDep is transparent to the destination "c", + // then we could still perform the xform by moving M up to the first memcpy. + // + // NOTE: This is conservative, it will stop on any read from the source loc, + // not just the defining memcpy. + MemDepResult SourceDep = + MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), + false, M, M->getParent()); + if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; - // If all checks passed, then we can transform these memcpy's - const Type *ArgTys[3] = { M->getRawDest()->getType(), - MDep->getRawSource()->getType(), - M->getLength()->getType() }; - Function *MemCpyFun = Intrinsic::getDeclaration( - M->getParent()->getParent()->getParent(), - M->getIntrinsicID(), ArgTys, 3); - - Value *Args[5] = { - M->getRawDest(), MDep->getRawSource(), M->getLength(), - M->getAlignmentCst(), M->getVolatileCst() - }; - - CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M); - - - // If C and M don't interfere, then this is a valid transformation. If they - // did, this would mean that the two sources overlap, which would be bad. - if (MD.getDependency(C) == dep) { - MD.removeInstruction(M); + // If the dest of the second might alias the source of the first, then the + // source and dest might overlap. We still want to eliminate the intermediate + // value, but we have to generate a memmove instead of memcpy. + bool UseMemMove = false; + if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep))) + UseMemMove = true; + + // If all checks passed, then we can transform M. + + // Make sure to use the lesser of the alignment of the source and the dest + // since we're changing where we're reading from, but don't want to increase + // the alignment past what can be read from or written to. + // TODO: Is this worth it if we're creating a less aligned memcpy? For + // example we could be moving from movaps -> movq on x86. + unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); + + IRBuilder<> Builder(M); + if (UseMemMove) + Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), + Align, M->isVolatile()); + else + Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), + Align, M->isVolatile()); + + // Remove the instruction we're replacing. + MD->removeInstruction(M); + M->eraseFromParent(); + ++NumMemCpyInstr; + return true; +} + + +/// processMemCpy - perform simplification of memcpy's. If we have memcpy A +/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite +/// B to be a memcpy from X to Z (or potentially a memmove, depending on +/// circumstances). This allows later passes to remove the first memcpy +/// altogether. +bool MemCpyOpt::processMemCpy(MemCpyInst *M) { + // We can only optimize statically-sized memcpy's that are non-volatile. + ConstantInt *CopySize = dyn_cast(M->getLength()); + if (CopySize == 0 || M->isVolatile()) return false; + + // If the source and destination of the memcpy are the same, then zap it. + if (M->getSource() == M->getDest()) { + MD->removeInstruction(M); M->eraseFromParent(); - ++NumMemCpyInstr; - return true; + return false; } - - // Otherwise, there was no point in doing this, so we remove the call we - // inserted and act like nothing happened. - MD.removeInstruction(C); - C->eraseFromParent(); + + // If copying from a constant, try to turn the memcpy into a memset. + if (GlobalVariable *GV = dyn_cast(M->getSource())) + if (GV->isConstant() && GV->hasDefinitiveInitializer()) + if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { + IRBuilder<> Builder(M); + Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize, + M->getAlignment(), false); + MD->removeInstruction(M); + M->eraseFromParent(); + ++NumCpyToSet; + return true; + } + + // The are two possible optimizations we can do for memcpy: + // a) memcpy-memcpy xform which exposes redundance for DSE. + // b) call-memcpy xform for return slot optimization. + MemDepResult DepInfo = MD->getDependency(M); + if (DepInfo.isClobber()) { + if (CallInst *C = dyn_cast(DepInfo.getInst())) { + if (performCallSlotOptzn(M, M->getDest(), M->getSource(), + CopySize->getZExtValue(), C)) { + MD->removeInstruction(M); + M->eraseFromParent(); + return true; + } + } + } + + AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M); + MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, + M, M->getParent()); + if (SrcDepInfo.isClobber()) { + if (MemCpyInst *MDep = dyn_cast(SrcDepInfo.getInst())) + return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); + } + return false; } @@ -725,55 +833,130 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) { bool MemCpyOpt::processMemMove(MemMoveInst *M) { AliasAnalysis &AA = getAnalysis(); - // If the memmove is a constant size, use it for the alias query, this allows - // us to optimize things like: memmove(P, P+64, 64); - uint64_t MemMoveSize = ~0ULL; - if (ConstantInt *Len = dyn_cast(M->getLength())) - MemMoveSize = Len->getZExtValue(); + if (!TLI->has(LibFunc::memmove)) + return false; // See if the pointers alias. - if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) != - AliasAnalysis::NoAlias) + if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M))) return false; DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); // If not, then we know we can transform this. Module *Mod = M->getParent()->getParent()->getParent(); - const Type *ArgTys[3] = { M->getRawDest()->getType(), - M->getRawSource()->getType(), - M->getLength()->getType() }; - M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys, 3)); + Type *ArgTys[3] = { M->getRawDest()->getType(), + M->getRawSource()->getType(), + M->getLength()->getType() }; + M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, + ArgTys)); // MemDep may have over conservative information about this instruction, just // conservatively flush it from the cache. - getAnalysis().removeInstruction(M); + MD->removeInstruction(M); ++NumMoveToCpy; return true; } +/// processByValArgument - This is called on every byval argument in call sites. +bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { + if (TD == 0) return false; + + // Find out what feeds this byval argument. + Value *ByValArg = CS.getArgument(ArgNo); + Type *ByValTy = cast(ByValArg->getType())->getElementType(); + uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); + MemDepResult DepInfo = + MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), + true, CS.getInstruction(), + CS.getInstruction()->getParent()); + if (!DepInfo.isClobber()) + return false; + + // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by + // a memcpy, see if we can byval from the source of the memcpy instead of the + // result. + MemCpyInst *MDep = dyn_cast(DepInfo.getInst()); + if (MDep == 0 || MDep->isVolatile() || + ByValArg->stripPointerCasts() != MDep->getDest()) + return false; + + // The length of the memcpy must be larger or equal to the size of the byval. + ConstantInt *C1 = dyn_cast(MDep->getLength()); + if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) + return false; + + // Get the alignment of the byval. If the call doesn't specify the alignment, + // then it is some target specific value that we can't know. + unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); + if (ByValAlign == 0) return false; + + // If it is greater than the memcpy, then we check to see if we can force the + // source of the memcpy to the alignment we need. If we fail, we bail out. + if (MDep->getAlignment() < ByValAlign && + getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign) + return false; + + // Verify that the copied-from memory doesn't change in between the memcpy and + // the byval call. + // memcpy(a <- b) + // *b = 42; + // foo(*a) + // It would be invalid to transform the second memcpy into foo(*b). + // + // NOTE: This is conservative, it will stop on any read from the source loc, + // not just the defining memcpy. + MemDepResult SourceDep = + MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), + false, CS.getInstruction(), MDep->getParent()); + if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) + return false; + + Value *TmpCast = MDep->getSource(); + if (MDep->getSource()->getType() != ByValArg->getType()) + TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), + "tmpcast", CS.getInstruction()); + + DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" + << " " << *MDep << "\n" + << " " << *CS.getInstruction() << "\n"); + + // Otherwise we're good! Update the byval argument. + CS.setArgument(ArgNo, TmpCast); + ++NumMemCpyInstr; + return true; +} -// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN. +/// iterateOnFunction - Executes one iteration of MemCpyOpt. bool MemCpyOpt::iterateOnFunction(Function &F) { bool MadeChange = false; // Walk all instruction in the function. for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { - for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); - BI != BE;) { + for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { // Avoid invalidating the iterator. Instruction *I = BI++; + bool RepeatInstruction = false; + if (StoreInst *SI = dyn_cast(I)) MadeChange |= processStore(SI, BI); + else if (MemSetInst *M = dyn_cast(I)) + RepeatInstruction = processMemSet(M, BI); else if (MemCpyInst *M = dyn_cast(I)) - MadeChange |= processMemCpy(M); - else if (MemMoveInst *M = dyn_cast(I)) { - if (processMemMove(M)) { - --BI; // Reprocess the new memcpy. - MadeChange = true; - } + RepeatInstruction = processMemCpy(M); + else if (MemMoveInst *M = dyn_cast(I)) + RepeatInstruction = processMemMove(M); + else if (CallSite CS = (Value*)I) { + for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) + if (CS.isByValArgument(i)) + MadeChange |= processByValArgument(CS, i); + } + + // Reprocess the instruction if desired. + if (RepeatInstruction) { + if (BI != BB->begin()) --BI; + MadeChange = true; } } } @@ -786,14 +969,22 @@ bool MemCpyOpt::iterateOnFunction(Function &F) { // bool MemCpyOpt::runOnFunction(Function &F) { bool MadeChange = false; + MD = &getAnalysis(); + TD = getAnalysisIfAvailable(); + TLI = &getAnalysis(); + + // If we don't have at least memset and memcpy, there is little point of doing + // anything here. These are required by a freestanding implementation, so if + // even they are disabled, there is no point in trying hard. + if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) + return false; + while (1) { if (!iterateOnFunction(F)) break; MadeChange = true; } + MD = 0; return MadeChange; } - - -