X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=lib%2FTransforms%2FScalar%2FGVN.cpp;h=d9308c4e3710d63b51dc591aff5721c6f1b94736;hb=cf0db29df20d9c665da7e82bb261bdd7cf7f1b2b;hp=09aff1b3b3cb4a3346b1e970d5471a5e9af30c35;hpb=5cdbb1de7d9ca578330b35b7fb7b10296216316b;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 09aff1b3b3c..d9308c4e371 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -15,38 +15,47 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "gvn" #include "llvm/Transforms/Scalar.h" -#include "llvm/GlobalVariable.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/CFG.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/PHITransAddr.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetLibraryInfo.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/SSAUpdater.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/Hashing.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/Statistic.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/PatternMatch.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/IRBuilder.h" -#include "llvm/Support/PatternMatch.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/SSAUpdater.h" +#include using namespace llvm; using namespace PatternMatch; +#define DEBUG_TYPE "gvn" + STATISTIC(NumGVNInstr, "Number of instructions deleted"); STATISTIC(NumGVNLoad, "Number of loads deleted"); STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); @@ -129,7 +138,7 @@ namespace { uint32_t getNextUnusedValueNumber() { return nextValueNumber; } void verifyRemoved(const Value *) const; }; -} +} // namespace namespace llvm { template <> struct DenseMapInfo { @@ -150,7 +159,7 @@ template <> struct DenseMapInfo { } }; -} +} // namespace llvm //===----------------------------------------------------------------------===// // ValueTable Internal Functions @@ -172,7 +181,7 @@ Expression ValueTable::create_expression(Instruction *I) { if (e.varargs[0] > e.varargs[1]) std::swap(e.varargs[0], e.varargs[1]); } - + if (CmpInst *C = dyn_cast(I)) { // Sort the operand value numbers so xx get the same value number. CmpInst::Predicate Predicate = C->getPredicate(); @@ -186,7 +195,7 @@ Expression ValueTable::create_expression(Instruction *I) { II != IE; ++II) e.varargs.push_back(*II); } - + return e; } @@ -210,13 +219,13 @@ Expression ValueTable::create_cmp_expression(unsigned Opcode, } Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) { - assert(EI != 0 && "Not an ExtractValueInst?"); + assert(EI && "Not an ExtractValueInst?"); Expression e; e.type = EI->getType(); e.opcode = 0; IntrinsicInst *I = dyn_cast(EI->getAggregateOperand()); - if (I != 0 && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { + if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { // EI might be an extract from one of our recognised intrinsics. If it // is we'll synthesize a semantically equivalent expression instead on // an extract value expression. @@ -270,16 +279,16 @@ void ValueTable::add(Value *V, uint32_t num) { valueNumbering.insert(std::make_pair(V, num)); } -uint32_t ValueTable::lookup_or_add_call(CallInst* C) { +uint32_t ValueTable::lookup_or_add_call(CallInst *C) { if (AA->doesNotAccessMemory(C)) { Expression exp = create_expression(C); - uint32_t& e = expressionNumbering[exp]; + uint32_t &e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; valueNumbering[C] = e; return e; } else if (AA->onlyReadsMemory(C)) { Expression exp = create_expression(C); - uint32_t& e = expressionNumbering[exp]; + uint32_t &e = expressionNumbering[exp]; if (!e) { e = nextValueNumber++; valueNumbering[C] = e; @@ -324,7 +333,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) { const MemoryDependenceAnalysis::NonLocalDepInfo &deps = MD->getNonLocalCallDependency(CallSite(C)); // FIXME: Move the checking logic to MemDep! - CallInst* cdep = 0; + CallInst* cdep = nullptr; // Check to see if we have a single dominating call instruction that is // identical to C. @@ -335,8 +344,8 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) { // We don't handle non-definitions. If we already have a call, reject // instruction dependencies. - if (!I->getResult().isDef() || cdep != 0) { - cdep = 0; + if (!I->getResult().isDef() || cdep != nullptr) { + cdep = nullptr; break; } @@ -347,7 +356,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) { continue; } - cdep = 0; + cdep = nullptr; break; } @@ -390,7 +399,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) { valueNumbering[V] = nextValueNumber; return nextValueNumber++; } - + Instruction* I = cast(V); Expression exp; switch (I->getOpcode()) { @@ -412,7 +421,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) { case Instruction::LShr: case Instruction::AShr: case Instruction::And: - case Instruction::Or : + case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: @@ -450,7 +459,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) { return e; } -/// lookup - Returns the value number of the specified value. Fails if +/// Returns the value number of the specified value. Fails if /// the value has not yet been numbered. uint32_t ValueTable::lookup(Value *V) const { DenseMap::const_iterator VI = valueNumbering.find(V); @@ -458,7 +467,7 @@ uint32_t ValueTable::lookup(Value *V) const { return VI->second; } -/// lookup_or_add_cmp - Returns the value number of the given comparison, +/// Returns the value number of the given comparison, /// assigning it a new number if it did not have one before. Useful when /// we deduced the result of a comparison, but don't immediately have an /// instruction realizing that comparison to hand. @@ -471,14 +480,14 @@ uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode, return e; } -/// clear - Remove all entries from the ValueTable. +/// Remove all entries from the ValueTable. void ValueTable::clear() { valueNumbering.clear(); expressionNumbering.clear(); nextValueNumber = 1; } -/// erase - Remove a value from the value numbering. +/// Remove a value from the value numbering. void ValueTable::erase(Value *V) { valueNumbering.erase(V); } @@ -497,82 +506,167 @@ void ValueTable::verifyRemoved(const Value *V) const { //===----------------------------------------------------------------------===// namespace { + class GVN; + struct AvailableValueInBlock { + /// BB - The basic block in question. + BasicBlock *BB; + enum ValType { + SimpleVal, // A simple offsetted value that is accessed. + LoadVal, // A value produced by a load. + MemIntrin, // A memory intrinsic which is loaded from. + UndefVal // A UndefValue representing a value from dead block (which + // is not yet physically removed from the CFG). + }; + + /// V - The value that is live out of the block. + PointerIntPair Val; + + /// Offset - The byte offset in Val that is interesting for the load query. + unsigned Offset; + + static AvailableValueInBlock get(BasicBlock *BB, Value *V, + unsigned Offset = 0) { + AvailableValueInBlock Res; + Res.BB = BB; + Res.Val.setPointer(V); + Res.Val.setInt(SimpleVal); + Res.Offset = Offset; + return Res; + } + + static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, + unsigned Offset = 0) { + AvailableValueInBlock Res; + Res.BB = BB; + Res.Val.setPointer(MI); + Res.Val.setInt(MemIntrin); + Res.Offset = Offset; + return Res; + } + + static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, + unsigned Offset = 0) { + AvailableValueInBlock Res; + Res.BB = BB; + Res.Val.setPointer(LI); + Res.Val.setInt(LoadVal); + Res.Offset = Offset; + return Res; + } + + static AvailableValueInBlock getUndef(BasicBlock *BB) { + AvailableValueInBlock Res; + Res.BB = BB; + Res.Val.setPointer(nullptr); + Res.Val.setInt(UndefVal); + Res.Offset = 0; + return Res; + } + + bool isSimpleValue() const { return Val.getInt() == SimpleVal; } + bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } + bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } + bool isUndefValue() const { return Val.getInt() == UndefVal; } + + Value *getSimpleValue() const { + assert(isSimpleValue() && "Wrong accessor"); + return Val.getPointer(); + } + + LoadInst *getCoercedLoadValue() const { + assert(isCoercedLoadValue() && "Wrong accessor"); + return cast(Val.getPointer()); + } + + MemIntrinsic *getMemIntrinValue() const { + assert(isMemIntrinValue() && "Wrong accessor"); + return cast(Val.getPointer()); + } + + /// Emit code into this block to adjust the value defined here to the + /// specified type. This handles various coercion cases. + Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const; + }; class GVN : public FunctionPass { bool NoLoads; MemoryDependenceAnalysis *MD; DominatorTree *DT; - const TargetData *TD; const TargetLibraryInfo *TLI; + AssumptionCache *AC; + SetVector DeadBlocks; ValueTable VN; - - /// LeaderTable - A mapping from value numbers to lists of Value*'s that + + /// A mapping from value numbers to lists of Value*'s that /// have that value number. Use findLeader to query it. struct LeaderTableEntry { Value *Val; - BasicBlock *BB; + const BasicBlock *BB; LeaderTableEntry *Next; }; DenseMap LeaderTable; BumpPtrAllocator TableAllocator; - + SmallVector InstrsToErase; + + typedef SmallVector LoadDepVect; + typedef SmallVector AvailValInBlkVect; + typedef SmallVector UnavailBlkVect; + public: static char ID; // Pass identification, replacement for typeid explicit GVN(bool noloads = false) - : FunctionPass(ID), NoLoads(noloads), MD(0) { + : FunctionPass(ID), NoLoads(noloads), MD(nullptr) { initializeGVNPass(*PassRegistry::getPassRegistry()); } - bool runOnFunction(Function &F); - - /// markInstructionForDeletion - This removes the specified instruction from + bool runOnFunction(Function &F) override; + + /// This removes the specified instruction from /// our various maps and marks it for deletion. void markInstructionForDeletion(Instruction *I) { VN.erase(I); InstrsToErase.push_back(I); } - - const TargetData *getTargetData() const { return TD; } + DominatorTree &getDominatorTree() const { return *DT; } AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } MemoryDependenceAnalysis &getMemDep() const { return *MD; } private: - /// addToLeaderTable - Push a new Value to the LeaderTable onto the list for - /// its value number. - void addToLeaderTable(uint32_t N, Value *V, BasicBlock *BB) { + /// Push a new Value to the LeaderTable onto the list for its value number. + void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) { LeaderTableEntry &Curr = LeaderTable[N]; if (!Curr.Val) { Curr.Val = V; Curr.BB = BB; return; } - + LeaderTableEntry *Node = TableAllocator.Allocate(); Node->Val = V; Node->BB = BB; Node->Next = Curr.Next; Curr.Next = Node; } - - /// removeFromLeaderTable - Scan the list of values corresponding to a given + + /// Scan the list of values corresponding to a given /// value number, and remove the given instruction if encountered. void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) { - LeaderTableEntry* Prev = 0; + LeaderTableEntry* Prev = nullptr; LeaderTableEntry* Curr = &LeaderTable[N]; while (Curr->Val != I || Curr->BB != BB) { Prev = Curr; Curr = Curr->Next; } - + if (Prev) { Prev->Next = Curr->Next; } else { if (!Curr->Next) { - Curr->Val = 0; - Curr->BB = 0; + Curr->Val = nullptr; + Curr->BB = nullptr; } else { LeaderTableEntry* Next = Curr->Next; Curr->Val = Next->Val; @@ -586,51 +680,65 @@ namespace { SmallVector, 4> toSplit; // This transformation requires dominator postdominator info - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); - AU.addRequired(); + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); if (!NoLoads) AU.addRequired(); AU.addRequired(); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); } - - // Helper fuctions - // FIXME: eliminate or document these better + + // Helper fuctions of redundant load elimination bool processLoad(LoadInst *L); - bool processInstruction(Instruction *I); bool processNonLocalLoad(LoadInst *L); + void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, + AvailValInBlkVect &ValuesPerBlock, + UnavailBlkVect &UnavailableBlocks); + bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, + UnavailBlkVect &UnavailableBlocks); + + // Other helper routines + bool processInstruction(Instruction *I); bool processBlock(BasicBlock *BB); void dump(DenseMap &d); bool iterateOnFunction(Function &F); bool performPRE(Function &F); - Value *findLeader(BasicBlock *BB, uint32_t num); + bool performScalarPRE(Instruction *I); + bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, + unsigned int ValNo); + Value *findLeader(const BasicBlock *BB, uint32_t num); void cleanupGlobalSets(); void verifyRemoved(const Instruction *I) const; bool splitCriticalEdges(); - unsigned replaceAllDominatedUsesWith(Value *From, Value *To, - BasicBlock *Root); - bool propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root); + BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ); + bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root); + bool processFoldableCondBr(BranchInst *BI); + void addDeadBlock(BasicBlock *BB); + void assignValNumForDeadCode(); }; char GVN::ID = 0; -} +} // namespace -// createGVNPass - The public interface to this file... +// The public interface to this file... FunctionPass *llvm::createGVNPass(bool NoLoads) { return new GVN(NoLoads); } INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) -INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void GVN::dump(DenseMap& d) { errs() << "{\n"; for (DenseMap::iterator I = d.begin(), @@ -640,8 +748,9 @@ void GVN::dump(DenseMap& d) { } errs() << "}\n"; } +#endif -/// IsValueFullyAvailableInBlock - Return true if we can prove that the value +/// Return true if we can prove that the value /// we're analyzing is fully available in the specified block. As we go, keep /// track of which blocks we know are fully alive in FullyAvailableBlocks. This /// map is actually a tri-state map with the following values: @@ -687,7 +796,7 @@ static bool IsValueFullyAvailableInBlock(BasicBlock *BB, return true; -// SpeculationFailure - If we get here, we found out that this is not, after +// If we get here, we found out that this is not, after // all, a fully-available block. We have a problem if we speculated on this and // used the speculation to mark other blocks as available. SpeculationFailure: @@ -715,120 +824,116 @@ SpeculationFailure: // Mark as unavailable. EntryVal = 0; - for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) - BBWorklist.push_back(*I); + BBWorklist.append(succ_begin(Entry), succ_end(Entry)); } while (!BBWorklist.empty()); return false; } -/// CanCoerceMustAliasedValueToLoad - Return true if -/// CoerceAvailableValueToLoadType will succeed. +/// Return true if CoerceAvailableValueToLoadType will succeed. static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, - const TargetData &TD) { + const DataLayout &DL) { // If the loaded or stored value is an first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy() || StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy()) return false; - + // The store has to be at least as big as the load. - if (TD.getTypeSizeInBits(StoredVal->getType()) < - TD.getTypeSizeInBits(LoadTy)) + if (DL.getTypeSizeInBits(StoredVal->getType()) < + DL.getTypeSizeInBits(LoadTy)) return false; - + return true; } - -/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and +/// If we saw a store of a value to memory, and /// then a load from a must-aliased pointer of a different type, try to coerce -/// the stored value. LoadedTy is the type of the load we want to replace and -/// InsertPt is the place to insert new instructions. +/// the stored value. LoadedTy is the type of the load we want to replace. +/// IRB is IRBuilder used to insert new instructions. /// /// If we can't do it, return null. -static Value *CoerceAvailableValueToLoadType(Value *StoredVal, - Type *LoadedTy, - Instruction *InsertPt, - const TargetData &TD) { - if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) - return 0; - +static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, + IRBuilder<> &IRB, + const DataLayout &DL) { + if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL)) + return nullptr; + // If this is already the right type, just return it. Type *StoredValTy = StoredVal->getType(); - - uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); - uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); - + + uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy); + uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy); + // If the store and reload are the same size, we can always reuse it. if (StoreSize == LoadSize) { // Pointer to Pointer -> use bitcast. - if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) - return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); - + if (StoredValTy->getScalarType()->isPointerTy() && + LoadedTy->getScalarType()->isPointerTy()) + return IRB.CreateBitCast(StoredVal, LoadedTy); + // Convert source pointers to integers, which can be bitcast. - if (StoredValTy->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); - StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); + if (StoredValTy->getScalarType()->isPointerTy()) { + StoredValTy = DL.getIntPtrType(StoredValTy); + StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); } - + Type *TypeToCastTo = LoadedTy; - if (TypeToCastTo->isPointerTy()) - TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); - + if (TypeToCastTo->getScalarType()->isPointerTy()) + TypeToCastTo = DL.getIntPtrType(TypeToCastTo); + if (StoredValTy != TypeToCastTo) - StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); - + StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo); + // Cast to pointer if the load needs a pointer type. - if (LoadedTy->isPointerTy()) - StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); - + if (LoadedTy->getScalarType()->isPointerTy()) + StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy); + return StoredVal; } - + // If the loaded value is smaller than the available value, then we can // extract out a piece from it. If the available value is too small, then we // can't do anything. assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); - + // Convert source pointers to integers, which can be manipulated. - if (StoredValTy->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); - StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); + if (StoredValTy->getScalarType()->isPointerTy()) { + StoredValTy = DL.getIntPtrType(StoredValTy); + StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); } - + // Convert vectors and fp to integer, which can be manipulated. if (!StoredValTy->isIntegerTy()) { StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); - StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); + StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy); } - + // If this is a big-endian system, we need to shift the value down to the low // bits so that a truncate will work. - if (TD.isBigEndian()) { - Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); - StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); + if (DL.isBigEndian()) { + StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp"); } - + // Truncate the integer to the right size now. Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); - StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); - + StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc"); + if (LoadedTy == NewIntTy) return StoredVal; - + // If the result is a pointer, inttoptr. - if (LoadedTy->isPointerTy()) - return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); - + if (LoadedTy->getScalarType()->isPointerTy()) + return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr"); + // Otherwise, bitcast. - return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); + return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast"); } -/// AnalyzeLoadFromClobberingWrite - This function is called when we have a +/// This function is called when we have a /// memdep query of a load that ends up being a clobbering memory write (store, /// memset, memcpy, memmove). This means that the write *may* provide bits used /// by the load but we can't be sure because the pointers don't mustalias. @@ -839,18 +944,19 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, - const TargetData &TD) { + const DataLayout &DL) { // If the loaded or stored value is a first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy()) return -1; - + int64_t StoreOffset = 0, LoadOffset = 0; - Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD); - Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD); + Value *StoreBase = + GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL); + Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL); if (StoreBase != LoadBase) return -1; - + // If the load and store are to the exact same address, they should have been // a must alias. AA must have gotten confused. // FIXME: Study to see if/when this happens. One case is forwarding a memset @@ -865,18 +971,18 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, abort(); } #endif - + // If the load and store don't overlap at all, the store doesn't provide // anything to the load. In this case, they really don't alias at all, AA // must have gotten confused. - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); - + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy); + if ((WriteSizeInBits & 7) | (LoadSize & 7)) return -1; uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. LoadSize >>= 3; - - + + bool isAAFailure = false; if (StoreOffset < LoadOffset) isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; @@ -894,7 +1000,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, #endif return -1; } - + // If the Load isn't completely contained within the stored bits, we don't // have all the bits to feed it. We could do something crazy in the future // (issue a smaller load then merge the bits in) but this seems unlikely to be @@ -902,142 +1008,145 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, if (StoreOffset > LoadOffset || StoreOffset+StoreSize < LoadOffset+LoadSize) return -1; - + // Okay, we can do this transformation. Return the number of bytes into the // store that the load is. return LoadOffset-StoreOffset; -} +} -/// AnalyzeLoadFromClobberingStore - This function is called when we have a +/// This function is called when we have a /// memdep query of a load that ends up being a clobbering store. static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, - StoreInst *DepSI, - const TargetData &TD) { + StoreInst *DepSI) { // Cannot handle reading from store of first-class aggregate yet. if (DepSI->getValueOperand()->getType()->isStructTy() || DepSI->getValueOperand()->getType()->isArrayTy()) return -1; + const DataLayout &DL = DepSI->getModule()->getDataLayout(); Value *StorePtr = DepSI->getPointerOperand(); - uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); + uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, - StorePtr, StoreSize, TD); + StorePtr, StoreSize, DL); } -/// AnalyzeLoadFromClobberingLoad - This function is called when we have a +/// This function is called when we have a /// memdep query of a load that ends up being clobbered by another load. See if /// the other load can feed into the second load. static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, - LoadInst *DepLI, const TargetData &TD){ + LoadInst *DepLI, const DataLayout &DL){ // Cannot handle reading from store of first-class aggregate yet. if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) return -1; - + Value *DepPtr = DepLI->getPointerOperand(); - uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType()); - int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD); + uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); + int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); if (R != -1) return R; - + // If we have a load/load clobber an DepLI can be widened to cover this load, // then we should widen it! int64_t LoadOffs = 0; const Value *LoadBase = - GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD); - unsigned LoadSize = TD.getTypeStoreSize(LoadTy); - - unsigned Size = MemoryDependenceAnalysis:: - getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD); + GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy); + + unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( + LoadBase, LoadOffs, LoadSize, DepLI); if (Size == 0) return -1; - - return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD); + + return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL); } static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, - const TargetData &TD) { + const DataLayout &DL) { // If the mem operation is a non-constant size, we can't handle it. ConstantInt *SizeCst = dyn_cast(MI->getLength()); - if (SizeCst == 0) return -1; + if (!SizeCst) return -1; uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; // If this is memset, we just need to see if the offset is valid in the size // of the memset.. if (MI->getIntrinsicID() == Intrinsic::memset) return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), - MemSizeInBits, TD); - + MemSizeInBits, DL); + // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the // constant memory. MemTransferInst *MTI = cast(MI); - + Constant *Src = dyn_cast(MTI->getSource()); - if (Src == 0) return -1; - - GlobalVariable *GV = dyn_cast(GetUnderlyingObject(Src, &TD)); - if (GV == 0 || !GV->isConstant()) return -1; - + if (!Src) return -1; + + GlobalVariable *GV = dyn_cast(GetUnderlyingObject(Src, DL)); + if (!GV || !GV->isConstant()) return -1; + // See if the access is within the bounds of the transfer. int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, - MI->getDest(), MemSizeInBits, TD); + MI->getDest(), MemSizeInBits, DL); if (Offset == -1) return Offset; - + + unsigned AS = Src->getType()->getPointerAddressSpace(); // Otherwise, see if we can constant fold a load from the constant with the // offset applied as appropriate. Src = ConstantExpr::getBitCast(Src, - llvm::Type::getInt8PtrTy(Src->getContext())); - Constant *OffsetCst = + Type::getInt8PtrTy(Src->getContext(), AS)); + Constant *OffsetCst = ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); - Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); - Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); - if (ConstantFoldLoadFromConstPtr(Src, &TD)) + Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, + OffsetCst); + Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); + if (ConstantFoldLoadFromConstPtr(Src, DL)) return Offset; return -1; } - -/// GetStoreValueForLoad - This function is called when we have a + +/// This function is called when we have a /// memdep query of a load that ends up being a clobbering store. This means /// that the store provides bits used by the load but we the pointers don't /// mustalias. Check this case to see if there is anything more we can do /// before we give up. static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, - Instruction *InsertPt, const TargetData &TD){ + Instruction *InsertPt, const DataLayout &DL){ LLVMContext &Ctx = SrcVal->getType()->getContext(); - - uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; - uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; - - IRBuilder<> Builder(InsertPt->getParent(), InsertPt); - + + uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; + uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8; + + IRBuilder<> Builder(InsertPt); + // Compute which bits of the stored value are being used by the load. Convert // to an integer type to start with. - if (SrcVal->getType()->isPointerTy()) - SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx)); + if (SrcVal->getType()->getScalarType()->isPointerTy()) + SrcVal = Builder.CreatePtrToInt(SrcVal, + DL.getIntPtrType(SrcVal->getType())); if (!SrcVal->getType()->isIntegerTy()) SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); - + // Shift the bits to the least significant depending on endianness. unsigned ShiftAmt; - if (TD.isLittleEndian()) + if (DL.isLittleEndian()) ShiftAmt = Offset*8; else ShiftAmt = (StoreSize-LoadSize-Offset)*8; - + if (ShiftAmt) SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); - + if (LoadSize != StoreSize) SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); - - return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); + + return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL); } -/// GetLoadValueForLoad - This function is called when we have a +/// This function is called when we have a /// memdep query of a load that ends up being a clobbering load. This means /// that the load *may* provide bits used by the load but we can't be sure /// because the pointers don't mustalias. Check this case to see if there is @@ -1045,11 +1154,11 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn) { - const TargetData &TD = *gvn.getTargetData(); + const DataLayout &DL = SrcVal->getModule()->getDataLayout(); // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to // widen SrcVal out to a larger load. - unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType()); - unsigned LoadSize = TD.getTypeStoreSize(LoadTy); + unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType()); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy); if (Offset+LoadSize > SrcValSize) { assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); @@ -1060,15 +1169,15 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, NewLoadSize = NextPowerOf2(NewLoadSize); Value *PtrVal = SrcVal->getPointerOperand(); - + // Insert the new load after the old load. This ensures that subsequent // memdep queries will find the new load. We can't easily remove the old // load completely because it is already in the value numbering table. IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); - Type *DestPTy = + Type *DestPTy = IntegerType::get(LoadTy->getContext(), NewLoadSize*8); - DestPTy = PointerType::get(DestPTy, - cast(PtrVal->getType())->getAddressSpace()); + DestPTy = PointerType::get(DestPTy, + PtrVal->getType()->getPointerAddressSpace()); Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); LoadInst *NewLoad = Builder.CreateLoad(PtrVal); @@ -1077,16 +1186,16 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); - + // Replace uses of the original load with the wider load. On a big endian // system, we need to shift down to get the relevant bits. Value *RV = NewLoad; - if (TD.isBigEndian()) + if (DL.isBigEndian()) RV = Builder.CreateLShr(RV, NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); RV = Builder.CreateTrunc(RV, SrcVal->getType()); SrcVal->replaceAllUsesWith(RV); - + // We would like to use gvn.markInstructionForDeletion here, but we can't // because the load is already memoized into the leader map table that GVN // tracks. It is potentially possible to remove the load from the table, @@ -1095,21 +1204,21 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, gvn.getMemDep().removeInstruction(SrcVal); SrcVal = NewLoad; } - - return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD); + + return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL); } -/// GetMemInstValueForLoad - This function is called when we have a +/// This function is called when we have a /// memdep query of a load that ends up being a clobbering mem intrinsic. static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, - const TargetData &TD){ + const DataLayout &DL){ LLVMContext &Ctx = LoadTy->getContext(); - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8; + + IRBuilder<> Builder(InsertPt); - IRBuilder<> Builder(InsertPt->getParent(), InsertPt); - // We know that this method is only called when the mem transfer fully // provides the bits for the load. if (MemSetInst *MSI = dyn_cast(SrcInst)) { @@ -1118,9 +1227,9 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Value *Val = MSI->getValue(); if (LoadSize != 1) Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); - + Value *OneElt = Val; - + // Splat the value out to the right number of bits. for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { // If we can double the number of bytes set, do it. @@ -1130,180 +1239,74 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, NumBytesSet <<= 1; continue; } - + // Otherwise insert one byte at a time. Value *ShVal = Builder.CreateShl(Val, 1*8); Val = Builder.CreateOr(OneElt, ShVal); ++NumBytesSet; } - - return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); + + return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL); } - + // Otherwise, this is a memcpy/memmove from a constant global. MemTransferInst *MTI = cast(SrcInst); Constant *Src = cast(MTI->getSource()); + unsigned AS = Src->getType()->getPointerAddressSpace(); // Otherwise, see if we can constant fold a load from the constant with the // offset applied as appropriate. Src = ConstantExpr::getBitCast(Src, - llvm::Type::getInt8PtrTy(Src->getContext())); - Constant *OffsetCst = - ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); - Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); - Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); - return ConstantFoldLoadFromConstPtr(Src, &TD); + Type::getInt8PtrTy(Src->getContext(), AS)); + Constant *OffsetCst = + ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); + Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, + OffsetCst); + Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); + return ConstantFoldLoadFromConstPtr(Src, DL); } -namespace { - -struct AvailableValueInBlock { - /// BB - The basic block in question. - BasicBlock *BB; - enum ValType { - SimpleVal, // A simple offsetted value that is accessed. - LoadVal, // A value produced by a load. - MemIntrin // A memory intrinsic which is loaded from. - }; - - /// V - The value that is live out of the block. - PointerIntPair Val; - - /// Offset - The byte offset in Val that is interesting for the load query. - unsigned Offset; - - static AvailableValueInBlock get(BasicBlock *BB, Value *V, - unsigned Offset = 0) { - AvailableValueInBlock Res; - Res.BB = BB; - Res.Val.setPointer(V); - Res.Val.setInt(SimpleVal); - Res.Offset = Offset; - return Res; - } - - static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, - unsigned Offset = 0) { - AvailableValueInBlock Res; - Res.BB = BB; - Res.Val.setPointer(MI); - Res.Val.setInt(MemIntrin); - Res.Offset = Offset; - return Res; - } - - static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, - unsigned Offset = 0) { - AvailableValueInBlock Res; - Res.BB = BB; - Res.Val.setPointer(LI); - Res.Val.setInt(LoadVal); - Res.Offset = Offset; - return Res; - } - - bool isSimpleValue() const { return Val.getInt() == SimpleVal; } - bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } - bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } - - Value *getSimpleValue() const { - assert(isSimpleValue() && "Wrong accessor"); - return Val.getPointer(); - } - - LoadInst *getCoercedLoadValue() const { - assert(isCoercedLoadValue() && "Wrong accessor"); - return cast(Val.getPointer()); - } - - MemIntrinsic *getMemIntrinValue() const { - assert(isMemIntrinValue() && "Wrong accessor"); - return cast(Val.getPointer()); - } - - /// MaterializeAdjustedValue - Emit code into this block to adjust the value - /// defined here to the specified type. This handles various coercion cases. - Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const { - Value *Res; - if (isSimpleValue()) { - Res = getSimpleValue(); - if (Res->getType() != LoadTy) { - const TargetData *TD = gvn.getTargetData(); - assert(TD && "Need target data to handle type mismatch case"); - Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), - *TD); - - DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " - << *getSimpleValue() << '\n' - << *Res << '\n' << "\n\n\n"); - } - } else if (isCoercedLoadValue()) { - LoadInst *Load = getCoercedLoadValue(); - if (Load->getType() == LoadTy && Offset == 0) { - Res = Load; - } else { - Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), - gvn); - - DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " - << *getCoercedLoadValue() << '\n' - << *Res << '\n' << "\n\n\n"); - } - } else { - const TargetData *TD = gvn.getTargetData(); - assert(TD && "Need target data to handle type mismatch case"); - Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, - LoadTy, BB->getTerminator(), *TD); - DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset - << " " << *getMemIntrinValue() << '\n' - << *Res << '\n' << "\n\n\n"); - } - return Res; - } -}; - -} // end anonymous namespace -/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, +/// Given a set of loads specified by ValuesPerBlock, /// construct SSA form, allowing us to eliminate LI. This returns the value /// that should be used at LI's definition site. -static Value *ConstructSSAForLoadSet(LoadInst *LI, +static Value *ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl &ValuesPerBlock, GVN &gvn) { // Check for the fully redundant, dominating load case. In this case, we can // just use the dominating value directly. - if (ValuesPerBlock.size() == 1 && + if (ValuesPerBlock.size() == 1 && gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, - LI->getParent())) - return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn); + LI->getParent())) { + assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block"); + return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); + } // Otherwise, we have to construct SSA form. SmallVector NewPHIs; SSAUpdater SSAUpdate(&NewPHIs); SSAUpdate.Initialize(LI->getType(), LI->getName()); - - Type *LoadTy = LI->getType(); - + for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { const AvailableValueInBlock &AV = ValuesPerBlock[i]; BasicBlock *BB = AV.BB; - + if (SSAUpdate.HasValueForBlock(BB)) continue; - SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn)); + SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); } - + // Perform PHI construction. Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); - + // If new PHI nodes were created, notify alias analysis. - if (V->getType()->isPointerTy()) { + if (V->getType()->getScalarType()->isPointerTy()) { AliasAnalysis *AA = gvn.getAliasAnalysis(); - + for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) AA->copyValue(LI, NewPHIs[i]); - + // Now that we've copied information to the new PHIs, scan through // them again and inform alias analysis that we've added potentially // escaping uses to any values that are operands to these PHIs. @@ -1319,53 +1322,74 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI, return V; } +Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI, + GVN &gvn) const { + Value *Res; + Type *LoadTy = LI->getType(); + const DataLayout &DL = LI->getModule()->getDataLayout(); + if (isSimpleValue()) { + Res = getSimpleValue(); + if (Res->getType() != LoadTy) { + Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL); + + DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " + << *getSimpleValue() << '\n' + << *Res << '\n' << "\n\n\n"); + } + } else if (isCoercedLoadValue()) { + LoadInst *Load = getCoercedLoadValue(); + if (Load->getType() == LoadTy && Offset == 0) { + Res = Load; + } else { + Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), + gvn); + + DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " + << *getCoercedLoadValue() << '\n' + << *Res << '\n' << "\n\n\n"); + } + } else if (isMemIntrinValue()) { + Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, + BB->getTerminator(), DL); + DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset + << " " << *getMemIntrinValue() << '\n' + << *Res << '\n' << "\n\n\n"); + } else { + assert(isUndefValue() && "Should be UndefVal"); + DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); + return UndefValue::get(LoadTy); + } + return Res; +} + static bool isLifetimeStart(const Instruction *Inst) { if (const IntrinsicInst* II = dyn_cast(Inst)) return II->getIntrinsicID() == Intrinsic::lifetime_start; return false; } -/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are -/// non-local by performing PHI construction. -bool GVN::processNonLocalLoad(LoadInst *LI) { - // Find the non-local dependencies of the load. - SmallVector Deps; - AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI); - MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps); - //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " - // << Deps.size() << *LI << '\n'); - - // If we had to process more than one hundred blocks to find the - // dependencies, this load isn't worth worrying about. Optimizing - // it will be too expensive. - unsigned NumDeps = Deps.size(); - if (NumDeps > 100) - return false; - - // If we had a phi translation failure, we'll have a single entry which is a - // clobber in the current block. Reject this early. - if (NumDeps == 1 && - !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { - DEBUG( - dbgs() << "GVN: non-local load "; - WriteAsOperand(dbgs(), LI); - dbgs() << " has unknown dependencies\n"; - ); - return false; - } +void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, + AvailValInBlkVect &ValuesPerBlock, + UnavailBlkVect &UnavailableBlocks) { // Filter out useless results (non-locals, etc). Keep track of the blocks // where we have a value available in repl, also keep track of whether we see // dependencies that produce an unknown value for the load (such as a call // that could potentially clobber the load). - SmallVector ValuesPerBlock; - SmallVector UnavailableBlocks; - + unsigned NumDeps = Deps.size(); + const DataLayout &DL = LI->getModule()->getDataLayout(); for (unsigned i = 0, e = NumDeps; i != e; ++i) { BasicBlock *DepBB = Deps[i].getBB(); MemDepResult DepInfo = Deps[i].getResult(); - if (!DepInfo.isDef() && !DepInfo.isClobber()) { + if (DeadBlocks.count(DepBB)) { + // Dead dependent mem-op disguise as a load evaluating the same value + // as the load in question. + ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); + continue; + } + + if (!DepInfo.isDef() && !DepInfo.isClobber()) { UnavailableBlocks.push_back(DepBB); continue; } @@ -1375,14 +1399,14 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { // the pointer operand of the load if PHI translation occurs. Make sure // to consider the right address. Value *Address = Deps[i].getAddress(); - + // If the dependence is to a store that writes to a superset of the bits // read by the load, we can extract the bits we need for the load from the // stored value. if (StoreInst *DepSI = dyn_cast(DepInfo.getInst())) { - if (TD && Address) { - int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, - DepSI, *TD); + if (Address) { + int Offset = + AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, DepSI->getValueOperand(), @@ -1391,7 +1415,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { } } } - + // Check to see if we have something like this: // load i32* P // load i8* (P+1) @@ -1399,11 +1423,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { if (LoadInst *DepLI = dyn_cast(DepInfo.getInst())) { // If this is a clobber and L is the first instruction in its block, then // we have the first instruction in the entry block. - if (DepLI != LI && Address && TD) { - int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), - LI->getPointerOperand(), - DepLI, *TD); - + if (DepLI != LI && Address) { + int Offset = + AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); + if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, Offset)); @@ -1415,17 +1438,17 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. if (MemIntrinsic *DepMI = dyn_cast(DepInfo.getInst())) { - if (TD && Address) { + if (Address) { int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, - DepMI, *TD); + DepMI, DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, Offset)); continue; - } + } } } - + UnavailableBlocks.push_back(DepBB); continue; } @@ -1435,22 +1458,29 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { Instruction *DepInst = DepInfo.getInst(); // Loading the allocation -> undef. - if (isa(DepInst) || isMalloc(DepInst) || + if (isa(DepInst) || isMallocLikeFn(DepInst, TLI) || // Loading immediately after lifetime begin -> undef. isLifetimeStart(DepInst)) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, UndefValue::get(LI->getType()))); continue; } - + + // Loading from calloc (which zero initializes memory) -> zero + if (isCallocLikeFn(DepInst, TLI)) { + ValuesPerBlock.push_back(AvailableValueInBlock::get( + DepBB, Constant::getNullValue(LI->getType()))); + continue; + } + if (StoreInst *S = dyn_cast(DepInst)) { // Reject loads and stores that are to the same address but are of // different types if we have to. if (S->getValueOperand()->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. - if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), - LI->getType(), *TD)) { + if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(), + LI->getType(), DL)) { UnavailableBlocks.push_back(DepBB); continue; } @@ -1460,51 +1490,27 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { S->getValueOperand())); continue; } - + if (LoadInst *LD = dyn_cast(DepInst)) { // If the types mismatch and we can't handle it, reject reuse of the load. if (LD->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. - if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ + if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) { UnavailableBlocks.push_back(DepBB); continue; - } + } } ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD)); continue; } - - UnavailableBlocks.push_back(DepBB); - continue; - } - // If we have no predecessors that produce a known value for this load, exit - // early. - if (ValuesPerBlock.empty()) return false; - - // If all of the instructions we depend on produce a known value for this - // load, then it is fully redundant and we can use PHI insertion to compute - // its value. Insert PHIs and remove the fully redundant value now. - if (UnavailableBlocks.empty()) { - DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); - - // Perform PHI construction. - Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); - LI->replaceAllUsesWith(V); - - if (isa(V)) - V->takeName(LI); - if (V->getType()->isPointerTy()) - MD->invalidateCachedPointerInfo(V); - markInstructionForDeletion(LI); - ++NumGVNLoad; - return true; + UnavailableBlocks.push_back(DepBB); } +} - if (!EnablePRE || !EnableLoadPRE) - return false; - +bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, + UnavailBlkVect &UnavailableBlocks) { // Okay, we have *some* definitions of the value. This means that the value // is available in some of our (transitive) predecessors. Lets think about // doing PRE of this load. This will involve inserting a new load into the @@ -1522,19 +1528,16 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { BasicBlock *LoadBB = LI->getParent(); BasicBlock *TmpBB = LoadBB; - bool isSinglePred = false; - bool allSingleSucc = true; while (TmpBB->getSinglePredecessor()) { - isSinglePred = true; TmpBB = TmpBB->getSinglePredecessor(); if (TmpBB == LoadBB) // Infinite (unreachable) loop. return false; if (Blockers.count(TmpBB)) return false; - + // If any of these blocks has more than one successor (i.e. if the edge we - // just traversed was critical), then there are other paths through this - // block along which the load may not be anticipated. Hoisting the load + // just traversed was critical), then there are other paths through this + // block along which the load may not be anticipated. Hoisting the load // above this block would be adding the load to execution paths along // which it was not previously executed. if (TmpBB->getTerminator()->getNumSuccessors() != 1) @@ -1544,45 +1547,22 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { assert(TmpBB); LoadBB = TmpBB; - // FIXME: It is extremely unclear what this loop is doing, other than - // artificially restricting loadpre. - if (isSinglePred) { - bool isHot = false; - for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { - const AvailableValueInBlock &AV = ValuesPerBlock[i]; - if (AV.isSimpleValue()) - // "Hot" Instruction is in some loop (because it dominates its dep. - // instruction). - if (Instruction *I = dyn_cast(AV.getSimpleValue())) - if (DT->dominates(LI, I)) { - isHot = true; - break; - } - } - - // We are interested only in "hot" instructions. We don't want to do any - // mis-optimizations here. - if (!isHot) - return false; - } - // Check to see how many predecessors have the loaded value fully // available. - DenseMap PredLoads; + MapVector PredLoads; DenseMap FullyAvailableBlocks; for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) FullyAvailableBlocks[UnavailableBlocks[i]] = false; - SmallVector, 4> NeedToSplit; + SmallVector CriticalEdgePred; for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E; ++PI) { BasicBlock *Pred = *PI; if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { continue; } - PredLoads[Pred] = 0; if (Pred->getTerminator()->getNumSuccessors() != 1) { if (isa(Pred->getTerminator())) { @@ -1598,21 +1578,18 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { return false; } - unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); - NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); + CriticalEdgePred.push_back(Pred); + } else { + // Only add the predecessors that will not be split for now. + PredLoads[Pred] = nullptr; } } - if (!NeedToSplit.empty()) { - toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); - return false; - } - // Decide whether PRE is profitable for this load. - unsigned NumUnavailablePreds = PredLoads.size(); + unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); assert(NumUnavailablePreds != 0 && - "Fully available value should be eliminated above!"); - + "Fully available value should already be eliminated!"); + // If this load is unavailable in multiple predecessors, reject it. // FIXME: If we could restructure the CFG, we could make a common pred with // all the preds that don't have an available LI and insert a new load into @@ -1620,12 +1597,21 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { if (NumUnavailablePreds != 1) return false; + // Split critical edges, and update the unavailable predecessors accordingly. + for (BasicBlock *OrigPred : CriticalEdgePred) { + BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); + assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); + PredLoads[NewPred] = nullptr; + DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" + << LoadBB->getName() << '\n'); + } + // Check if the load can safely be moved to all the unavailable predecessors. bool CanDoPRE = true; + const DataLayout &DL = LI->getModule()->getDataLayout(); SmallVector NewInsts; - for (DenseMap::iterator I = PredLoads.begin(), - E = PredLoads.end(); I != E; ++I) { - BasicBlock *UnavailablePred = I->first; + for (auto &PredLoad : PredLoads) { + BasicBlock *UnavailablePred = PredLoad.first; // Do PHI translation to get its value in the predecessor if necessary. The // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. @@ -1633,44 +1619,21 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { // If all preds have a single successor, then we know it is safe to insert // the load on the pred (?!?), so we can insert code to materialize the // pointer if it is not available. - PHITransAddr Address(LI->getPointerOperand(), TD); - Value *LoadPtr = 0; - if (allSingleSucc) { - LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, - *DT, NewInsts); - } else { - Address.PHITranslateValue(LoadBB, UnavailablePred, DT); - LoadPtr = Address.getAddr(); - } + PHITransAddr Address(LI->getPointerOperand(), DL, AC); + Value *LoadPtr = nullptr; + LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, + *DT, NewInsts); // If we couldn't find or insert a computation of this phi translated value, // we fail PRE. - if (LoadPtr == 0) { + if (!LoadPtr) { DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " << *LI->getPointerOperand() << "\n"); CanDoPRE = false; break; } - // Make sure it is valid to move this load here. We have to watch out for: - // @1 = getelementptr (i8* p, ... - // test p and branch if == 0 - // load @1 - // It is valid to have the getelementptr before the test, even if p can - // be 0, as getelementptr only does address arithmetic. - // If we are not pushing the value through any multiple-successor blocks - // we do not have this case. Otherwise, check that the load is safe to - // put anywhere; this can be improved, but should be conservatively safe. - if (!allSingleSucc && - // FIXME: REEVALUTE THIS. - !isSafeToLoadUnconditionally(LoadPtr, - UnavailablePred->getTerminator(), - LI->getAlignment(), TD)) { - CanDoPRE = false; - break; - } - - I->second = LoadPtr; + PredLoad.second = LoadPtr; } if (!CanDoPRE) { @@ -1679,7 +1642,9 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { if (MD) MD->removeInstruction(I); I->eraseFromParent(); } - return false; + // HINT: Don't revert the edge-splitting as following transformation may + // also need to split these critical edges. + return !CriticalEdgePred.empty(); } // Okay, we can eliminate this load by inserting a reload in the predecessor @@ -1689,28 +1654,29 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back() << '\n'); - + // Assign value numbers to the new instructions. for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { - // FIXME: We really _ought_ to insert these value numbers into their + // FIXME: We really _ought_ to insert these value numbers into their // parent's availability map. However, in doing so, we risk getting into // ordering issues. If a block hasn't been processed yet, we would be // marking a value as AVAIL-IN, which isn't what we intend. VN.lookup_or_add(NewInsts[i]); } - for (DenseMap::iterator I = PredLoads.begin(), - E = PredLoads.end(); I != E; ++I) { - BasicBlock *UnavailablePred = I->first; - Value *LoadPtr = I->second; + for (const auto &PredLoad : PredLoads) { + BasicBlock *UnavailablePred = PredLoad.first; + Value *LoadPtr = PredLoad.second; Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, LI->getAlignment(), UnavailablePred->getTerminator()); - // Transfer the old load's TBAA tag to the new load. - if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) - NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag); + // Transfer the old load's AA tags to the new load. + AAMDNodes Tags; + LI->getAAMetadata(Tags); + if (Tags) + NewLoad->setAAMetadata(Tags); // Transfer DebugLoc. NewLoad->setDebugLoc(LI->getDebugLoc()); @@ -1727,14 +1693,131 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { LI->replaceAllUsesWith(V); if (isa(V)) V->takeName(LI); - if (V->getType()->isPointerTy()) + if (Instruction *I = dyn_cast(V)) + I->setDebugLoc(LI->getDebugLoc()); + if (V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(LI); ++NumPRELoad; return true; } -/// processLoad - Attempt to eliminate a load, first by eliminating it +/// Attempt to eliminate a load whose dependencies are +/// non-local by performing PHI construction. +bool GVN::processNonLocalLoad(LoadInst *LI) { + // Step 1: Find the non-local dependencies of the load. + LoadDepVect Deps; + MD->getNonLocalPointerDependency(LI, Deps); + + // If we had to process more than one hundred blocks to find the + // dependencies, this load isn't worth worrying about. Optimizing + // it will be too expensive. + unsigned NumDeps = Deps.size(); + if (NumDeps > 100) + return false; + + // If we had a phi translation failure, we'll have a single entry which is a + // clobber in the current block. Reject this early. + if (NumDeps == 1 && + !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { + DEBUG( + dbgs() << "GVN: non-local load "; + LI->printAsOperand(dbgs()); + dbgs() << " has unknown dependencies\n"; + ); + return false; + } + + // If this load follows a GEP, see if we can PRE the indices before analyzing. + if (GetElementPtrInst *GEP = dyn_cast(LI->getOperand(0))) { + for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), + OE = GEP->idx_end(); + OI != OE; ++OI) + if (Instruction *I = dyn_cast(OI->get())) + performScalarPRE(I); + } + + // Step 2: Analyze the availability of the load + AvailValInBlkVect ValuesPerBlock; + UnavailBlkVect UnavailableBlocks; + AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); + + // If we have no predecessors that produce a known value for this load, exit + // early. + if (ValuesPerBlock.empty()) + return false; + + // Step 3: Eliminate fully redundancy. + // + // If all of the instructions we depend on produce a known value for this + // load, then it is fully redundant and we can use PHI insertion to compute + // its value. Insert PHIs and remove the fully redundant value now. + if (UnavailableBlocks.empty()) { + DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); + + // Perform PHI construction. + Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); + LI->replaceAllUsesWith(V); + + if (isa(V)) + V->takeName(LI); + if (Instruction *I = dyn_cast(V)) + I->setDebugLoc(LI->getDebugLoc()); + if (V->getType()->getScalarType()->isPointerTy()) + MD->invalidateCachedPointerInfo(V); + markInstructionForDeletion(LI); + ++NumGVNLoad; + return true; + } + + // Step 4: Eliminate partial redundancy. + if (!EnablePRE || !EnableLoadPRE) + return false; + + return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); +} + + +static void patchReplacementInstruction(Instruction *I, Value *Repl) { + // Patch the replacement so that it is not more restrictive than the value + // being replaced. + BinaryOperator *Op = dyn_cast(I); + BinaryOperator *ReplOp = dyn_cast(Repl); + if (Op && ReplOp && isa(Op) && + isa(ReplOp)) { + if (ReplOp->hasNoSignedWrap() && !Op->hasNoSignedWrap()) + ReplOp->setHasNoSignedWrap(false); + if (ReplOp->hasNoUnsignedWrap() && !Op->hasNoUnsignedWrap()) + ReplOp->setHasNoUnsignedWrap(false); + } + if (Instruction *ReplInst = dyn_cast(Repl)) { + // FIXME: If both the original and replacement value are part of the + // same control-flow region (meaning that the execution of one + // guarentees the executation of the other), then we can combine the + // noalias scopes here and do better than the general conservative + // answer used in combineMetadata(). + + // In general, GVN unifies expressions over different control-flow + // regions, and so we need a conservative combination of the noalias + // scopes. + unsigned KnownIDs[] = { + LLVMContext::MD_tbaa, + LLVMContext::MD_alias_scope, + LLVMContext::MD_noalias, + LLVMContext::MD_range, + LLVMContext::MD_fpmath, + LLVMContext::MD_invariant_load, + }; + combineMetadata(ReplInst, I, KnownIDs); + } +} + +static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { + patchReplacementInstruction(I, Repl); + I->replaceAllUsesWith(Repl); +} + +/// Attempt to eliminate a load, first by eliminating it /// locally, and then attempting non-local elimination if that fails. bool GVN::processLoad(LoadInst *L) { if (!MD) @@ -1747,13 +1830,14 @@ bool GVN::processLoad(LoadInst *L) { markInstructionForDeletion(L); return true; } - + // ... to a pointer that has been loaded from before... MemDepResult Dep = MD->getDependency(L); + const DataLayout &DL = L->getModule()->getDataLayout(); // If we have a clobber and target data is around, see if this is a clobber // that we can fix up through code synthesis. - if (Dep.isClobber() && TD) { + if (Dep.isClobber()) { // Check to see if we have something like this: // store i32 123, i32* %P // %A = bitcast i32* %P to i8* @@ -1764,16 +1848,15 @@ bool GVN::processLoad(LoadInst *L) { // a common base + constant offset, and if the previous store (or memset) // completely covers this load. This sort of thing can happen in bitfield // access code. - Value *AvailVal = 0; + Value *AvailVal = nullptr; if (StoreInst *DepSI = dyn_cast(Dep.getInst())) { - int Offset = AnalyzeLoadFromClobberingStore(L->getType(), - L->getPointerOperand(), - DepSI, *TD); + int Offset = AnalyzeLoadFromClobberingStore( + L->getType(), L->getPointerOperand(), DepSI); if (Offset != -1) AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, - L->getType(), L, *TD); + L->getType(), L, DL); } - + // Check to see if we have something like this: // load i32* P // load i8* (P+1) @@ -1783,44 +1866,42 @@ bool GVN::processLoad(LoadInst *L) { // we have the first instruction in the entry block. if (DepLI == L) return false; - - int Offset = AnalyzeLoadFromClobberingLoad(L->getType(), - L->getPointerOperand(), - DepLI, *TD); + + int Offset = AnalyzeLoadFromClobberingLoad( + L->getType(), L->getPointerOperand(), DepLI, DL); if (Offset != -1) AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); } - + // If the clobbering value is a memset/memcpy/memmove, see if we can forward // a value on from it. if (MemIntrinsic *DepMI = dyn_cast(Dep.getInst())) { - int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), - L->getPointerOperand(), - DepMI, *TD); + int Offset = AnalyzeLoadFromClobberingMemInst( + L->getType(), L->getPointerOperand(), DepMI, DL); if (Offset != -1) - AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD); + AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL); } - + if (AvailVal) { DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' << *AvailVal << '\n' << *L << "\n\n\n"); - + // Replace the load! L->replaceAllUsesWith(AvailVal); - if (AvailVal->getType()->isPointerTy()) + if (AvailVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(AvailVal); markInstructionForDeletion(L); ++NumGVNLoad; return true; } } - + // If the value isn't available, don't do anything! if (Dep.isClobber()) { DEBUG( // fast print dep, using operator<< on instruction is too slow. dbgs() << "GVN: load "; - WriteAsOperand(dbgs(), L); + L->printAsOperand(dbgs()); Instruction *I = Dep.getInst(); dbgs() << " is clobbered by " << *I << '\n'; ); @@ -1835,7 +1916,7 @@ bool GVN::processLoad(LoadInst *L) { DEBUG( // fast print dep, using operator<< on instruction is too slow. dbgs() << "GVN: load "; - WriteAsOperand(dbgs(), L); + L->printAsOperand(dbgs()); dbgs() << " has unknown dependence\n"; ); return false; @@ -1844,27 +1925,24 @@ bool GVN::processLoad(LoadInst *L) { Instruction *DepInst = Dep.getInst(); if (StoreInst *DepSI = dyn_cast(DepInst)) { Value *StoredVal = DepSI->getValueOperand(); - + // The store and load are to a must-aliased pointer, but they may not // actually have the same type. See if we know how to reuse the stored // value (depending on its type). if (StoredVal->getType() != L->getType()) { - if (TD) { - StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), - L, *TD); - if (StoredVal == 0) - return false; - - DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal - << '\n' << *L << "\n\n\n"); - } - else + IRBuilder<> Builder(L); + StoredVal = + CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL); + if (!StoredVal) return false; + + DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal + << '\n' << *L << "\n\n\n"); } // Remove it! L->replaceAllUsesWith(StoredVal); - if (StoredVal->getType()->isPointerTy()) + if (StoredVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(StoredVal); markInstructionForDeletion(L); ++NumGVNLoad; @@ -1873,27 +1951,24 @@ bool GVN::processLoad(LoadInst *L) { if (LoadInst *DepLI = dyn_cast(DepInst)) { Value *AvailableVal = DepLI; - + // The loads are of a must-aliased pointer, but they may not actually have // the same type. See if we know how to reuse the previously loaded value // (depending on its type). if (DepLI->getType() != L->getType()) { - if (TD) { - AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), - L, *TD); - if (AvailableVal == 0) - return false; - - DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal - << "\n" << *L << "\n\n\n"); - } - else + IRBuilder<> Builder(L); + AvailableVal = + CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL); + if (!AvailableVal) return false; + + DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal + << "\n" << *L << "\n\n\n"); } - + // Remove it! - L->replaceAllUsesWith(AvailableVal); - if (DepLI->getType()->isPointerTy()) + patchAndReplaceAllUsesWith(L, AvailableVal); + if (DepLI->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(DepLI); markInstructionForDeletion(L); ++NumGVNLoad; @@ -1903,13 +1978,13 @@ bool GVN::processLoad(LoadInst *L) { // If this load really doesn't depend on anything, then we must be loading an // undef value. This can happen when loading for a fresh allocation with no // intervening stores, for example. - if (isa(DepInst) || isMalloc(DepInst)) { + if (isa(DepInst) || isMallocLikeFn(DepInst, TLI)) { L->replaceAllUsesWith(UndefValue::get(L->getType())); markInstructionForDeletion(L); ++NumGVNLoad; return true; } - + // If this load occurs either right after a lifetime begin, // then the loaded value is undefined. if (IntrinsicInst *II = dyn_cast(DepInst)) { @@ -1921,71 +1996,74 @@ bool GVN::processLoad(LoadInst *L) { } } + // If this load follows a calloc (which zero initializes memory), + // then the loaded value is zero + if (isCallocLikeFn(DepInst, TLI)) { + L->replaceAllUsesWith(Constant::getNullValue(L->getType())); + markInstructionForDeletion(L); + ++NumGVNLoad; + return true; + } + return false; } -// findLeader - In order to find a leader for a given value number at a +// In order to find a leader for a given value number at a // specific basic block, we first obtain the list of all Values for that number, -// and then scan the list to find one whose block dominates the block in +// and then scan the list to find one whose block dominates the block in // question. This is fast because dominator tree queries consist of only // a few comparisons of DFS numbers. -Value *GVN::findLeader(BasicBlock *BB, uint32_t num) { +Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { LeaderTableEntry Vals = LeaderTable[num]; - if (!Vals.Val) return 0; - - Value *Val = 0; + if (!Vals.Val) return nullptr; + + Value *Val = nullptr; if (DT->dominates(Vals.BB, BB)) { Val = Vals.Val; if (isa(Val)) return Val; } - + LeaderTableEntry* Next = Vals.Next; while (Next) { if (DT->dominates(Next->BB, BB)) { if (isa(Next->Val)) return Next->Val; if (!Val) Val = Next->Val; } - + Next = Next->Next; } return Val; } -/// replaceAllDominatedUsesWith - Replace all uses of 'From' with 'To' if the -/// use is dominated by the given basic block. Returns the number of uses that -/// were replaced. -unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To, - BasicBlock *Root) { - unsigned Count = 0; - for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); - UI != UE; ) { - Use &U = (UI++).getUse(); - - // If From occurs as a phi node operand then the use implicitly lives in the - // corresponding incoming block. Otherwise it is the block containing the - // user that must be dominated by Root. - BasicBlock *UsingBlock; - if (PHINode *PN = dyn_cast(U.getUser())) - UsingBlock = PN->getIncomingBlock(U); - else - UsingBlock = cast(U.getUser())->getParent(); - - if (DT->dominates(Root, UsingBlock)) { - U.set(To); - ++Count; - } - } - return Count; +/// There is an edge from 'Src' to 'Dst'. Return +/// true if every path from the entry block to 'Dst' passes via this edge. In +/// particular 'Dst' must not be reachable via another edge from 'Src'. +static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, + DominatorTree *DT) { + // While in theory it is interesting to consider the case in which Dst has + // more than one predecessor, because Dst might be part of a loop which is + // only reachable from Src, in practice it is pointless since at the time + // GVN runs all such loops have preheaders, which means that Dst will have + // been changed to have only one predecessor, namely Src. + const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); + const BasicBlock *Src = E.getStart(); + assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); + (void)Src; + return Pred != nullptr; } -/// propagateEquality - The given values are known to be equal in every block +/// The given values are known to be equal in every block /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with /// 'RHS' everywhere in the scope. Returns whether a change was made. -bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { +bool GVN::propagateEquality(Value *LHS, Value *RHS, + const BasicBlockEdge &Root) { SmallVector, 4> Worklist; Worklist.push_back(std::make_pair(LHS, RHS)); bool Changed = false; + // For speed, compute a conservative fast approximation to + // DT->dominates(Root, Root.getEnd()); + bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); while (!Worklist.empty()) { std::pair Item = Worklist.pop_back_val(); @@ -2002,24 +2080,21 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { std::swap(LHS, RHS); assert((isa(LHS) || isa(LHS)) && "Unexpected value!"); - // If there is no obvious reason to prefer the left-hand side over the right- - // hand side, ensure the longest lived term is on the right-hand side, so the - // shortest lived term will be replaced by the longest lived. This tends to - // expose more simplifications. + // If there is no obvious reason to prefer the left-hand side over the + // right-hand side, ensure the longest lived term is on the right-hand side, + // so the shortest lived term will be replaced by the longest lived. + // This tends to expose more simplifications. uint32_t LVN = VN.lookup_or_add(LHS); if ((isa(LHS) && isa(RHS)) || (isa(LHS) && isa(RHS))) { - // Move the 'oldest' value to the right-hand side, using the value number as - // a proxy for age. + // Move the 'oldest' value to the right-hand side, using the value number + // as a proxy for age. uint32_t RVN = VN.lookup_or_add(RHS); if (LVN < RVN) { std::swap(LHS, RHS); LVN = RVN; } } - assert((!isa(RHS) || - DT->properlyDominates(cast(RHS)->getParent(), Root)) && - "Instruction doesn't dominate scope!"); // If value numbering later sees that an instruction in the scope is equal // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve @@ -2028,22 +2103,24 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { // if RHS is an instruction (if an instruction in the scope is morphed into // LHS then it will be turned into RHS by the next GVN iteration anyway, so // using the leader table is about compiling faster, not optimizing better). - if (!isa(RHS)) - addToLeaderTable(LVN, RHS, Root); + // The leader table only tracks basic blocks, not edges. Only add to if we + // have the simple case where the edge dominates the end. + if (RootDominatesEnd && !isa(RHS)) + addToLeaderTable(LVN, RHS, Root.getEnd()); // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As // LHS always has at least one use that is not dominated by Root, this will // never do anything if LHS has only one use. if (!LHS->hasOneUse()) { - unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root); + unsigned NumReplacements = replaceDominatedUsesWith(LHS, RHS, *DT, Root); Changed |= NumReplacements > 0; NumGVNEqProp += NumReplacements; } - // Now try to deduce additional equalities from this one. For example, if the - // known equality was "(A != B)" == "false" then it follows that A and B are - // equal in the scope. Only boolean equalities with an explicit true or false - // RHS are currently supported. + // Now try to deduce additional equalities from this one. For example, if + // the known equality was "(A != B)" == "false" then it follows that A and B + // are equal in the scope. Only boolean equalities with an explicit true or + // false RHS are currently supported. if (!RHS->getType()->isIntegerTy(1)) // Not a boolean equality - bail out. continue; @@ -2068,7 +2145,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { // If we are propagating an equality like "(A == B)" == "true" then also // propagate the equality A == B. When propagating a comparison such as // "(A >= B)" == "true", replace all instances of "A < B" with "false". - if (ICmpInst *Cmp = dyn_cast(LHS)) { + if (CmpInst *Cmp = dyn_cast(LHS)) { Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); // If "A == B" is known true, or "A != B" is known false, then replace @@ -2077,28 +2154,47 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) Worklist.push_back(std::make_pair(Op0, Op1)); + // Handle the floating point versions of equality comparisons too. + if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || + (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { + + // Floating point -0.0 and 0.0 compare equal, so we can only + // propagate values if we know that we have a constant and that + // its value is non-zero. + + // FIXME: We should do this optimization if 'no signed zeros' is + // applicable via an instruction-level fast-math-flag or some other + // indicator that relaxed FP semantics are being used. + + if (isa(Op1) && !cast(Op1)->isZero()) + Worklist.push_back(std::make_pair(Op0, Op1)); + } + // If "A >= B" is known true, replace "A < B" with false everywhere. CmpInst::Predicate NotPred = Cmp->getInversePredicate(); Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); - // Since we don't have the instruction "A < B" immediately to hand, work out - // the value number that it would have and use that to find an appropriate - // instruction (if any). + // Since we don't have the instruction "A < B" immediately to hand, work + // out the value number that it would have and use that to find an + // appropriate instruction (if any). uint32_t NextNum = VN.getNextUnusedValueNumber(); uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1); // If the number we were assigned was brand new then there is no point in // looking for an instruction realizing it: there cannot be one! if (Num < NextNum) { - Value *NotCmp = findLeader(Root, Num); + Value *NotCmp = findLeader(Root.getEnd(), Num); if (NotCmp && isa(NotCmp)) { unsigned NumReplacements = - replaceAllDominatedUsesWith(NotCmp, NotVal, Root); + replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root); Changed |= NumReplacements > 0; NumGVNEqProp += NumReplacements; } } // Ensure that any instruction in scope that gets the "A < B" value number // is replaced with false. - addToLeaderTable(Num, NotVal, Root); + // The leader table only tracks basic blocks, not edges. Only add to if we + // have the simple case where the edge dominates the end. + if (RootDominatesEnd) + addToLeaderTable(Num, NotVal, Root.getEnd()); continue; } @@ -2107,23 +2203,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { return Changed; } -/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return -/// true if every path from the entry block to 'Dst' passes via this edge. In -/// particular 'Dst' must not be reachable via another edge from 'Src'. -static bool isOnlyReachableViaThisEdge(BasicBlock *Src, BasicBlock *Dst, - DominatorTree *DT) { - // While in theory it is interesting to consider the case in which Dst has - // more than one predecessor, because Dst might be part of a loop which is - // only reachable from Src, in practice it is pointless since at the time - // GVN runs all such loops have preheaders, which means that Dst will have - // been changed to have only one predecessor, namely Src. - BasicBlock *Pred = Dst->getSinglePredecessor(); - assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); - (void)Src; - return Pred != 0; -} - -/// processInstruction - When calculating availability, handle an instruction +/// When calculating availability, handle an instruction /// by inserting it into the appropriate sets bool GVN::processInstruction(Instruction *I) { // Ignore dbg info intrinsics. @@ -2134,9 +2214,10 @@ bool GVN::processInstruction(Instruction *I) { // to value numbering it. Value numbering often exposes redundancies, for // example if it determines that %y is equal to %x then the instruction // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. - if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) { + const DataLayout &DL = I->getModule()->getDataLayout(); + if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) { I->replaceAllUsesWith(V); - if (MD && V->getType()->isPointerTy()) + if (MD && V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(I); ++NumGVNSimpl; @@ -2155,25 +2236,29 @@ bool GVN::processInstruction(Instruction *I) { // For conditional branches, we can perform simple conditional propagation on // the condition value itself. if (BranchInst *BI = dyn_cast(I)) { - if (!BI->isConditional() || isa(BI->getCondition())) + if (!BI->isConditional()) return false; - Value *BranchCond = BI->getCondition(); + if (isa(BI->getCondition())) + return processFoldableCondBr(BI); + Value *BranchCond = BI->getCondition(); BasicBlock *TrueSucc = BI->getSuccessor(0); BasicBlock *FalseSucc = BI->getSuccessor(1); + // Avoid multiple edges early. + if (TrueSucc == FalseSucc) + return false; + BasicBlock *Parent = BI->getParent(); bool Changed = false; - if (isOnlyReachableViaThisEdge(Parent, TrueSucc, DT)) - Changed |= propagateEquality(BranchCond, - ConstantInt::getTrue(TrueSucc->getContext()), - TrueSucc); + Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); + BasicBlockEdge TrueE(Parent, TrueSucc); + Changed |= propagateEquality(BranchCond, TrueVal, TrueE); - if (isOnlyReachableViaThisEdge(Parent, FalseSucc, DT)) - Changed |= propagateEquality(BranchCond, - ConstantInt::getFalse(FalseSucc->getContext()), - FalseSucc); + Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); + BasicBlockEdge FalseE(Parent, FalseSucc); + Changed |= propagateEquality(BranchCond, FalseVal, FalseE); return Changed; } @@ -2183,11 +2268,20 @@ bool GVN::processInstruction(Instruction *I) { Value *SwitchCond = SI->getCondition(); BasicBlock *Parent = SI->getParent(); bool Changed = false; + + // Remember how many outgoing edges there are to every successor. + SmallDenseMap SwitchEdges; + for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) + ++SwitchEdges[SI->getSuccessor(i)]; + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { BasicBlock *Dst = i.getCaseSuccessor(); - if (isOnlyReachableViaThisEdge(Parent, Dst, DT)) - Changed |= propagateEquality(SwitchCond, i.getCaseValue(), Dst); + // If there is only a single edge, propagate the case value into it. + if (SwitchEdges.lookup(Dst) == 1) { + BasicBlockEdge E(Parent, Dst); + Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E); + } } return Changed; } @@ -2195,7 +2289,7 @@ bool GVN::processInstruction(Instruction *I) { // Instructions with void type don't return a value, so there's // no point in trying to find redundancies in them. if (I->getType()->isVoidTy()) return false; - + uint32_t NextNum = VN.getNextUnusedValueNumber(); unsigned Num = VN.lookup_or_add(I); @@ -2213,19 +2307,19 @@ bool GVN::processInstruction(Instruction *I) { addToLeaderTable(Num, I, I->getParent()); return false; } - + // Perform fast-path value-number based elimination of values inherited from // dominators. Value *repl = findLeader(I->getParent(), Num); - if (repl == 0) { + if (!repl) { // Failure, just remember this instance for future use. addToLeaderTable(Num, I, I->getParent()); return false; } - + // Remove it! - I->replaceAllUsesWith(repl); - if (MD && repl->getType()->isPointerTy()) + patchAndReplaceAllUsesWith(I, repl); + if (MD && repl->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(repl); markInstructionForDeletion(I); return true; @@ -2233,11 +2327,14 @@ bool GVN::processInstruction(Instruction *I) { /// runOnFunction - This is the main transformation entry point for a function. bool GVN::runOnFunction(Function& F) { + if (skipOptnoneFunction(F)) + return false; + if (!NoLoads) MD = &getAnalysis(); - DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); - TLI = &getAnalysis(); + DT = &getAnalysis().getDomTree(); + AC = &getAnalysis().getAssumptionCache(F); + TLI = &getAnalysis().getTLI(); VN.setAliasAnalysis(&getAnalysis()); VN.setMemDep(MD); VN.setDomTree(DT); @@ -2249,8 +2346,9 @@ bool GVN::runOnFunction(Function& F) { // optimization opportunities. for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { BasicBlock *BB = FI++; - - bool removedBlock = MergeBlockIntoPredecessor(BB, this); + + bool removedBlock = MergeBlockIntoPredecessor( + BB, DT, /* LoopInfo */ nullptr, VN.getAliasAnalysis(), MD); if (removedBlock) ++NumGVNBlocks; Changed |= removedBlock; @@ -2260,25 +2358,30 @@ bool GVN::runOnFunction(Function& F) { while (ShouldContinue) { DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); ShouldContinue = iterateOnFunction(F); - if (splitCriticalEdges()) - ShouldContinue = true; Changed |= ShouldContinue; ++Iteration; } if (EnablePRE) { + // Fabricate val-num for dead-code in order to suppress assertion in + // performPRE(). + assignValNumForDeadCode(); bool PREChanged = true; while (PREChanged) { PREChanged = performPRE(F); Changed |= PREChanged; } } + // FIXME: Should perform GVN again after PRE does something. PRE can move // computations into blocks where they become fully redundant. Note that // we can't do this until PRE's critical edge splitting updates memdep. // Actually, when this happens, we should just fully integrate PRE into GVN. cleanupGlobalSets(); + // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each + // iteration. + DeadBlocks.clear(); return Changed; } @@ -2289,6 +2392,9 @@ bool GVN::processBlock(BasicBlock *BB) { // (and incrementing BI before processing an instruction). assert(InstrsToErase.empty() && "We expect InstrsToErase to be empty across iterations"); + if (DeadBlocks.count(BB)) + return false; + bool ChangedFunction = false; for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); @@ -2307,12 +2413,12 @@ bool GVN::processBlock(BasicBlock *BB) { if (!AtStart) --BI; - for (SmallVector::iterator I = InstrsToErase.begin(), + for (SmallVectorImpl::iterator I = InstrsToErase.begin(), E = InstrsToErase.end(); I != E; ++I) { DEBUG(dbgs() << "GVN removed: " << **I << '\n'); if (MD) MD->removeInstruction(*I); - (*I)->eraseFromParent(); DEBUG(verifyRemoved(*I)); + (*I)->eraseFromParent(); } InstrsToErase.clear(); @@ -2325,175 +2431,204 @@ bool GVN::processBlock(BasicBlock *BB) { return ChangedFunction; } -/// performPRE - Perform a purely local form of PRE that looks for diamond -/// control flow patterns and attempts to perform simple PRE at the join point. -bool GVN::performPRE(Function &F) { - bool Changed = false; - DenseMap predMap; - for (df_iterator DI = df_begin(&F.getEntryBlock()), - DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { - BasicBlock *CurrentBlock = *DI; +// Instantiate an expression in a predecessor that lacked it. +bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, + unsigned int ValNo) { + // Because we are going top-down through the block, all value numbers + // will be available in the predecessor by the time we need them. Any + // that weren't originally present will have been instantiated earlier + // in this loop. + bool success = true; + for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { + Value *Op = Instr->getOperand(i); + if (isa(Op) || isa(Op) || isa(Op)) + continue; - // Nothing to PRE in the entry block. - if (CurrentBlock == &F.getEntryBlock()) continue; + if (Value *V = findLeader(Pred, VN.lookup(Op))) { + Instr->setOperand(i, V); + } else { + success = false; + break; + } + } - // Don't perform PRE on a landing pad. - if (CurrentBlock->isLandingPad()) continue; + // Fail out if we encounter an operand that is not available in + // the PRE predecessor. This is typically because of loads which + // are not value numbered precisely. + if (!success) + return false; - for (BasicBlock::iterator BI = CurrentBlock->begin(), - BE = CurrentBlock->end(); BI != BE; ) { - Instruction *CurInst = BI++; + Instr->insertBefore(Pred->getTerminator()); + Instr->setName(Instr->getName() + ".pre"); + Instr->setDebugLoc(Instr->getDebugLoc()); + VN.add(Instr, ValNo); - if (isa(CurInst) || - isa(CurInst) || isa(CurInst) || - CurInst->getType()->isVoidTy() || - CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || - isa(CurInst)) - continue; + // Update the availability map to include the new instruction. + addToLeaderTable(ValNo, Instr, Pred); + return true; +} - // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from - // sinking the compare again, and it would force the code generator to - // move the i1 from processor flags or predicate registers into a general - // purpose register. - if (isa(CurInst)) - continue; +bool GVN::performScalarPRE(Instruction *CurInst) { + SmallVector, 8> predMap; - // We don't currently value number ANY inline asm calls. - if (CallInst *CallI = dyn_cast(CurInst)) - if (CallI->isInlineAsm()) - continue; + if (isa(CurInst) || isa(CurInst) || + isa(CurInst) || CurInst->getType()->isVoidTy() || + CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || + isa(CurInst)) + return false; - uint32_t ValNo = VN.lookup(CurInst); - - // Look for the predecessors for PRE opportunities. We're - // only trying to solve the basic diamond case, where - // a value is computed in the successor and one predecessor, - // but not the other. We also explicitly disallow cases - // where the successor is its own predecessor, because they're - // more complicated to get right. - unsigned NumWith = 0; - unsigned NumWithout = 0; - BasicBlock *PREPred = 0; - predMap.clear(); - - for (pred_iterator PI = pred_begin(CurrentBlock), - PE = pred_end(CurrentBlock); PI != PE; ++PI) { - BasicBlock *P = *PI; - // We're not interested in PRE where the block is its - // own predecessor, or in blocks with predecessors - // that are not reachable. - if (P == CurrentBlock) { - NumWithout = 2; - break; - } else if (!DT->dominates(&F.getEntryBlock(), P)) { - NumWithout = 2; - break; - } + // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from + // sinking the compare again, and it would force the code generator to + // move the i1 from processor flags or predicate registers into a general + // purpose register. + if (isa(CurInst)) + return false; - Value* predV = findLeader(P, ValNo); - if (predV == 0) { - PREPred = P; - ++NumWithout; - } else if (predV == CurInst) { - NumWithout = 2; - } else { - predMap[P] = predV; - ++NumWith; - } - } + // We don't currently value number ANY inline asm calls. + if (CallInst *CallI = dyn_cast(CurInst)) + if (CallI->isInlineAsm()) + return false; - // Don't do PRE when it might increase code size, i.e. when - // we would need to insert instructions in more than one pred. - if (NumWithout != 1 || NumWith == 0) - continue; - - // Don't do PRE across indirect branch. - if (isa(PREPred->getTerminator())) - continue; + uint32_t ValNo = VN.lookup(CurInst); + + // Look for the predecessors for PRE opportunities. We're + // only trying to solve the basic diamond case, where + // a value is computed in the successor and one predecessor, + // but not the other. We also explicitly disallow cases + // where the successor is its own predecessor, because they're + // more complicated to get right. + unsigned NumWith = 0; + unsigned NumWithout = 0; + BasicBlock *PREPred = nullptr; + BasicBlock *CurrentBlock = CurInst->getParent(); + predMap.clear(); + + for (pred_iterator PI = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); + PI != PE; ++PI) { + BasicBlock *P = *PI; + // We're not interested in PRE where the block is its + // own predecessor, or in blocks with predecessors + // that are not reachable. + if (P == CurrentBlock) { + NumWithout = 2; + break; + } else if (!DT->isReachableFromEntry(P)) { + NumWithout = 2; + break; + } - // We can't do PRE safely on a critical edge, so instead we schedule - // the edge to be split and perform the PRE the next time we iterate - // on the function. - unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); - if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { - toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); - continue; - } + Value *predV = findLeader(P, ValNo); + if (!predV) { + predMap.push_back(std::make_pair(static_cast(nullptr), P)); + PREPred = P; + ++NumWithout; + } else if (predV == CurInst) { + /* CurInst dominates this predecessor. */ + NumWithout = 2; + break; + } else { + predMap.push_back(std::make_pair(predV, P)); + ++NumWith; + } + } - // Instantiate the expression in the predecessor that lacked it. - // Because we are going top-down through the block, all value numbers - // will be available in the predecessor by the time we need them. Any - // that weren't originally present will have been instantiated earlier - // in this loop. - Instruction *PREInstr = CurInst->clone(); - bool success = true; - for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { - Value *Op = PREInstr->getOperand(i); - if (isa(Op) || isa(Op) || isa(Op)) - continue; + // Don't do PRE when it might increase code size, i.e. when + // we would need to insert instructions in more than one pred. + if (NumWithout > 1 || NumWith == 0) + return false; - if (Value *V = findLeader(PREPred, VN.lookup(Op))) { - PREInstr->setOperand(i, V); - } else { - success = false; - break; - } - } + // We may have a case where all predecessors have the instruction, + // and we just need to insert a phi node. Otherwise, perform + // insertion. + Instruction *PREInstr = nullptr; - // Fail out if we encounter an operand that is not available in - // the PRE predecessor. This is typically because of loads which - // are not value numbered precisely. - if (!success) { - delete PREInstr; - DEBUG(verifyRemoved(PREInstr)); - continue; - } + if (NumWithout != 0) { + // Don't do PRE across indirect branch. + if (isa(PREPred->getTerminator())) + return false; - PREInstr->insertBefore(PREPred->getTerminator()); - PREInstr->setName(CurInst->getName() + ".pre"); - PREInstr->setDebugLoc(CurInst->getDebugLoc()); - predMap[PREPred] = PREInstr; - VN.add(PREInstr, ValNo); - ++NumGVNPRE; - - // Update the availability map to include the new instruction. - addToLeaderTable(ValNo, PREInstr, PREPred); - - // Create a PHI to make the value available in this block. - pred_iterator PB = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); - PHINode* Phi = PHINode::Create(CurInst->getType(), std::distance(PB, PE), - CurInst->getName() + ".pre-phi", - CurrentBlock->begin()); - for (pred_iterator PI = PB; PI != PE; ++PI) { - BasicBlock *P = *PI; - Phi->addIncoming(predMap[P], P); - } + // We can't do PRE safely on a critical edge, so instead we schedule + // the edge to be split and perform the PRE the next time we iterate + // on the function. + unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); + if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { + toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); + return false; + } + // We need to insert somewhere, so let's give it a shot + PREInstr = CurInst->clone(); + if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) { + // If we failed insertion, make sure we remove the instruction. + DEBUG(verifyRemoved(PREInstr)); + delete PREInstr; + return false; + } + } - VN.add(Phi, ValNo); - addToLeaderTable(ValNo, Phi, CurrentBlock); - Phi->setDebugLoc(CurInst->getDebugLoc()); - CurInst->replaceAllUsesWith(Phi); - if (Phi->getType()->isPointerTy()) { - // Because we have added a PHI-use of the pointer value, it has now - // "escaped" from alias analysis' perspective. We need to inform - // AA of this. - for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; - ++ii) { - unsigned jj = PHINode::getOperandNumForIncomingValue(ii); - VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj)); - } - - if (MD) - MD->invalidateCachedPointerInfo(Phi); - } - VN.erase(CurInst); - removeFromLeaderTable(ValNo, CurInst, CurrentBlock); + // Either we should have filled in the PRE instruction, or we should + // not have needed insertions. + assert (PREInstr != nullptr || NumWithout == 0); - DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); - if (MD) MD->removeInstruction(CurInst); - CurInst->eraseFromParent(); - DEBUG(verifyRemoved(CurInst)); - Changed = true; + ++NumGVNPRE; + + // Create a PHI to make the value available in this block. + PHINode *Phi = + PHINode::Create(CurInst->getType(), predMap.size(), + CurInst->getName() + ".pre-phi", CurrentBlock->begin()); + for (unsigned i = 0, e = predMap.size(); i != e; ++i) { + if (Value *V = predMap[i].first) + Phi->addIncoming(V, predMap[i].second); + else + Phi->addIncoming(PREInstr, PREPred); + } + + VN.add(Phi, ValNo); + addToLeaderTable(ValNo, Phi, CurrentBlock); + Phi->setDebugLoc(CurInst->getDebugLoc()); + CurInst->replaceAllUsesWith(Phi); + if (Phi->getType()->getScalarType()->isPointerTy()) { + // Because we have added a PHI-use of the pointer value, it has now + // "escaped" from alias analysis' perspective. We need to inform + // AA of this. + for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; ++ii) { + unsigned jj = PHINode::getOperandNumForIncomingValue(ii); + VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj)); + } + + if (MD) + MD->invalidateCachedPointerInfo(Phi); + } + VN.erase(CurInst); + removeFromLeaderTable(ValNo, CurInst, CurrentBlock); + + DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); + if (MD) + MD->removeInstruction(CurInst); + DEBUG(verifyRemoved(CurInst)); + CurInst->eraseFromParent(); + ++NumGVNInstr; + + return true; +} + +/// Perform a purely local form of PRE that looks for diamond +/// control flow patterns and attempts to perform simple PRE at the join point. +bool GVN::performPRE(Function &F) { + bool Changed = false; + for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { + // Nothing to PRE in the entry block. + if (CurrentBlock == &F.getEntryBlock()) + continue; + + // Don't perform PRE on a landing pad. + if (CurrentBlock->isLandingPad()) + continue; + + for (BasicBlock::iterator BI = CurrentBlock->begin(), + BE = CurrentBlock->end(); + BI != BE;) { + Instruction *CurInst = BI++; + Changed = performScalarPRE(CurInst); } } @@ -2503,36 +2638,51 @@ bool GVN::performPRE(Function &F) { return Changed; } -/// splitCriticalEdges - Split critical edges found during the previous +/// Split the critical edge connecting the given two blocks, and return +/// the block inserted to the critical edge. +BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { + BasicBlock *BB = SplitCriticalEdge( + Pred, Succ, CriticalEdgeSplittingOptions(getAliasAnalysis(), DT)); + if (MD) + MD->invalidateCachedPredecessors(); + return BB; +} + +/// Split critical edges found during the previous /// iteration that may enable further optimization. bool GVN::splitCriticalEdges() { if (toSplit.empty()) return false; do { std::pair Edge = toSplit.pop_back_val(); - SplitCriticalEdge(Edge.first, Edge.second, this); + SplitCriticalEdge(Edge.first, Edge.second, + CriticalEdgeSplittingOptions(getAliasAnalysis(), DT)); } while (!toSplit.empty()); if (MD) MD->invalidateCachedPredecessors(); return true; } -/// iterateOnFunction - Executes one iteration of GVN +/// Executes one iteration of GVN bool GVN::iterateOnFunction(Function &F) { cleanupGlobalSets(); - + // Top-down walk of the dominator tree bool Changed = false; -#if 0 + // Save the blocks this function have before transformation begins. GVN may + // split critical edge, and hence may invalidate the RPO/DT iterator. + // + std::vector BBVect; + BBVect.reserve(256); // Needed for value numbering with phi construction to work. - ReversePostOrderTraversal RPOT(&F); - for (ReversePostOrderTraversal::rpo_iterator RI = RPOT.begin(), - RE = RPOT.end(); RI != RE; ++RI) - Changed |= processBlock(*RI); -#else - for (df_iterator DI = df_begin(DT->getRootNode()), - DE = df_end(DT->getRootNode()); DI != DE; ++DI) - Changed |= processBlock(DI->getBlock()); -#endif + ReversePostOrderTraversal RPOT(&F); + for (ReversePostOrderTraversal::rpo_iterator RI = RPOT.begin(), + RE = RPOT.end(); + RI != RE; ++RI) + BBVect.push_back(*RI); + + for (std::vector::iterator I = BBVect.begin(), E = BBVect.end(); + I != E; I++) + Changed |= processBlock(*I); return Changed; } @@ -2543,7 +2693,7 @@ void GVN::cleanupGlobalSets() { TableAllocator.Reset(); } -/// verifyRemoved - Verify that the specified instruction does not occur in our +/// Verify that the specified instruction does not occur in our /// internal data structures. void GVN::verifyRemoved(const Instruction *Inst) const { VN.verifyRemoved(Inst); @@ -2554,10 +2704,139 @@ void GVN::verifyRemoved(const Instruction *Inst) const { I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { const LeaderTableEntry *Node = &I->second; assert(Node->Val != Inst && "Inst still in value numbering scope!"); - + while (Node->Next) { Node = Node->Next; assert(Node->Val != Inst && "Inst still in value numbering scope!"); } } } + +/// BB is declared dead, which implied other blocks become dead as well. This +/// function is to add all these blocks to "DeadBlocks". For the dead blocks' +/// live successors, update their phi nodes by replacing the operands +/// corresponding to dead blocks with UndefVal. +void GVN::addDeadBlock(BasicBlock *BB) { + SmallVector NewDead; + SmallSetVector DF; + + NewDead.push_back(BB); + while (!NewDead.empty()) { + BasicBlock *D = NewDead.pop_back_val(); + if (DeadBlocks.count(D)) + continue; + + // All blocks dominated by D are dead. + SmallVector Dom; + DT->getDescendants(D, Dom); + DeadBlocks.insert(Dom.begin(), Dom.end()); + + // Figure out the dominance-frontier(D). + for (SmallVectorImpl::iterator I = Dom.begin(), + E = Dom.end(); I != E; I++) { + BasicBlock *B = *I; + for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) { + BasicBlock *S = *SI; + if (DeadBlocks.count(S)) + continue; + + bool AllPredDead = true; + for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++) + if (!DeadBlocks.count(*PI)) { + AllPredDead = false; + break; + } + + if (!AllPredDead) { + // S could be proved dead later on. That is why we don't update phi + // operands at this moment. + DF.insert(S); + } else { + // While S is not dominated by D, it is dead by now. This could take + // place if S already have a dead predecessor before D is declared + // dead. + NewDead.push_back(S); + } + } + } + } + + // For the dead blocks' live successors, update their phi nodes by replacing + // the operands corresponding to dead blocks with UndefVal. + for(SmallSetVector::iterator I = DF.begin(), E = DF.end(); + I != E; I++) { + BasicBlock *B = *I; + if (DeadBlocks.count(B)) + continue; + + SmallVector Preds(pred_begin(B), pred_end(B)); + for (SmallVectorImpl::iterator PI = Preds.begin(), + PE = Preds.end(); PI != PE; PI++) { + BasicBlock *P = *PI; + + if (!DeadBlocks.count(P)) + continue; + + if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { + if (BasicBlock *S = splitCriticalEdges(P, B)) + DeadBlocks.insert(P = S); + } + + for (BasicBlock::iterator II = B->begin(); isa(II); ++II) { + PHINode &Phi = cast(*II); + Phi.setIncomingValue(Phi.getBasicBlockIndex(P), + UndefValue::get(Phi.getType())); + } + } + } +} + +// If the given branch is recognized as a foldable branch (i.e. conditional +// branch with constant condition), it will perform following analyses and +// transformation. +// 1) If the dead out-coming edge is a critical-edge, split it. Let +// R be the target of the dead out-coming edge. +// 1) Identify the set of dead blocks implied by the branch's dead outcoming +// edge. The result of this step will be {X| X is dominated by R} +// 2) Identify those blocks which haves at least one dead prodecessor. The +// result of this step will be dominance-frontier(R). +// 3) Update the PHIs in DF(R) by replacing the operands corresponding to +// dead blocks with "UndefVal" in an hope these PHIs will optimized away. +// +// Return true iff *NEW* dead code are found. +bool GVN::processFoldableCondBr(BranchInst *BI) { + if (!BI || BI->isUnconditional()) + return false; + + ConstantInt *Cond = dyn_cast(BI->getCondition()); + if (!Cond) + return false; + + BasicBlock *DeadRoot = Cond->getZExtValue() ? + BI->getSuccessor(1) : BI->getSuccessor(0); + if (DeadBlocks.count(DeadRoot)) + return false; + + if (!DeadRoot->getSinglePredecessor()) + DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); + + addDeadBlock(DeadRoot); + return true; +} + +// performPRE() will trigger assert if it comes across an instruction without +// associated val-num. As it normally has far more live instructions than dead +// instructions, it makes more sense just to "fabricate" a val-number for the +// dead code than checking if instruction involved is dead or not. +void GVN::assignValNumForDeadCode() { + for (SetVector::iterator I = DeadBlocks.begin(), + E = DeadBlocks.end(); I != E; I++) { + BasicBlock *BB = *I; + for (BasicBlock::iterator II = BB->begin(), EE = BB->end(); + II != EE; II++) { + Instruction *Inst = &*II; + unsigned ValNum = VN.lookup_or_add(Inst); + addToLeaderTable(ValNum, Inst, BB); + } + } +}