X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=include%2Fllvm%2FTransforms%2FUtils%2FLocal.h;h=2354e0ae160bd171a13cf1638241f26e0f4e2f0b;hb=8a43b3fad98540d6a25e85eccee11ae2d82733f3;hp=c9d29168217aa63c7150185f334916eb4c7fd0db;hpb=5649ba70fb39f2fda4791d255ae8bb373071874f;p=oota-llvm.git diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h index c9d29168217..2354e0ae160 100644 --- a/include/llvm/Transforms/Utils/Local.h +++ b/include/llvm/Transforms/Utils/Local.h @@ -15,6 +15,13 @@ #ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H #define LLVM_TRANSFORMS_UTILS_LOCAL_H +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Operator.h" + namespace llvm { class User; @@ -26,15 +33,18 @@ class DbgDeclareInst; class StoreInst; class LoadInst; class Value; -class Pass; class PHINode; class AllocaInst; +class AssumptionCache; class ConstantExpr; -class TargetData; +class DataLayout; +class TargetLibraryInfo; +class TargetTransformInfo; class DIBuilder; +class DominatorTree; template class SmallVectorImpl; - + //===----------------------------------------------------------------------===// // Local constant propagation. // @@ -46,7 +56,8 @@ template class SmallVectorImpl; /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// conditions and indirectbr addresses this might make dead if /// DeleteDeadConditions is true. -bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false); +bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false, + const TargetLibraryInfo *TLI = nullptr); //===----------------------------------------------------------------------===// // Local dead code elimination. @@ -55,29 +66,32 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false); /// isInstructionTriviallyDead - Return true if the result produced by the /// instruction is not used, and the instruction has no side effects. /// -bool isInstructionTriviallyDead(Instruction *I); +bool isInstructionTriviallyDead(Instruction *I, + const TargetLibraryInfo *TLI = nullptr); /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a /// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead, delete them too, recursively. Return true if any /// instructions were deleted. -bool RecursivelyDeleteTriviallyDeadInstructions(Value *V); +bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, + const TargetLibraryInfo *TLI = nullptr); /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively /// dead PHI node, due to being a def-use chain of single-use nodes that /// either forms a cycle or is terminated by a trivially dead instruction, /// delete it. If that makes any of its operands trivially dead, delete them /// too, recursively. Return true if a change was made. -bool RecursivelyDeleteDeadPHINode(PHINode *PN); +bool RecursivelyDeleteDeadPHINode(PHINode *PN, + const TargetLibraryInfo *TLI = nullptr); - /// SimplifyInstructionsInBlock - Scan the specified basic block and try to /// simplify any instructions in it and recursively delete dead instructions. /// /// This returns true if it changed the code, note that it can delete /// instructions in other blocks as well in this block. -bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0); - +bool SimplifyInstructionsInBlock(BasicBlock *BB, + const TargetLibraryInfo *TLI = nullptr); + //===----------------------------------------------------------------------===// // Control Flow Graph Restructuring. // @@ -93,17 +107,14 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0); /// /// .. and delete the predecessor corresponding to the '1', this will attempt to /// recursively fold the 'and' to 0. -void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, - TargetData *TD = 0); - - +void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred); + /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its /// predecessor is known to have one successor (BB!). Eliminate the edge /// between them, moving the instructions in the predecessor into BB. This /// deletes the predecessor block. /// -void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = 0); - +void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr); /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an /// unconditional branch, and contains no instructions other than PHI nodes, @@ -125,13 +136,20 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB); /// of the CFG. It returns true if a modification was made, possibly deleting /// the basic block that was pointed to. /// -bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0); +bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, + unsigned BonusInstThreshold, AssumptionCache *AC = nullptr); + +/// FlatternCFG - This function is used to flatten a CFG. For +/// example, it uses parallel-and and parallel-or mode to collapse +// if-conditions and merge if-regions with identical statements. +/// +bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr); /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch, /// and if a predecessor branches to us and one of our successors, fold the /// setcc into the predecessor and use logical operations to pick the right /// destination. -bool FoldBranchToCommonDest(BranchInst *BI); +bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1); /// DemoteRegToStack - This function takes a virtual register computed by an /// Instruction and replaces it with a slot in the stack frame, allocated via @@ -141,35 +159,106 @@ bool FoldBranchToCommonDest(BranchInst *BI); /// AllocaInst *DemoteRegToStack(Instruction &X, bool VolatileLoads = false, - Instruction *AllocaPoint = 0); + Instruction *AllocaPoint = nullptr); /// DemotePHIToStack - This function takes a virtual register computed by a phi /// node and replaces it with a slot in the stack frame, allocated via alloca. -/// The phi node is deleted and it returns the pointer to the alloca inserted. -AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0); +/// The phi node is deleted and it returns the pointer to the alloca inserted. +AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr); /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that /// we can determine, return it, otherwise return 0. If PrefAlign is specified, /// and it is more than the alignment of the ultimate object, see if we can /// increase the alignment of the ultimate object, making this check succeed. unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, - const TargetData *TD = 0); + const DataLayout &DL, + const Instruction *CxtI = nullptr, + AssumptionCache *AC = nullptr, + const DominatorTree *DT = nullptr); /// getKnownAlignment - Try to infer an alignment for the specified pointer. -static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) { - return getOrEnforceKnownAlignment(V, 0, TD); +static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL, + const Instruction *CxtI = nullptr, + AssumptionCache *AC = nullptr, + const DominatorTree *DT = nullptr) { + return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT); +} + +/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the +/// code necessary to compute the offset from the base pointer (without adding +/// in the base pointer). Return the result as a signed integer of intptr size. +/// When NoAssumptions is true, no assumptions about index computation not +/// overflowing is made. +template +Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, + bool NoAssumptions = false) { + GEPOperator *GEPOp = cast(GEP); + Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); + Value *Result = Constant::getNullValue(IntPtrTy); + + // If the GEP is inbounds, we know that none of the addressing operations will + // overflow in an unsigned sense. + bool isInBounds = GEPOp->isInBounds() && !NoAssumptions; + + // Build a mask for high order bits. + unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); + uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth); + + gep_type_iterator GTI = gep_type_begin(GEP); + for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; + ++i, ++GTI) { + Value *Op = *i; + uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; + if (Constant *OpC = dyn_cast(Op)) { + if (OpC->isZeroValue()) + continue; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + if (OpC->getType()->isVectorTy()) + OpC = OpC->getSplatValue(); + + uint64_t OpValue = cast(OpC)->getZExtValue(); + Size = DL.getStructLayout(STy)->getElementOffset(OpValue); + + if (Size) + Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), + GEP->getName()+".offs"); + continue; + } + + Constant *Scale = ConstantInt::get(IntPtrTy, Size); + Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); + Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); + // Emit an add instruction. + Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); + continue; + } + // Convert to correct type. + if (Op->getType() != IntPtrTy) + Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); + if (Size != 1) { + // We'll let instcombine(mul) convert this to a shl if possible. + Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), + GEP->getName()+".idx", isInBounds /*NUW*/); + } + + // Emit an add instruction. + Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); + } + return Result; } ///===---------------------------------------------------------------------===// /// Dbg Intrinsic utilities /// -/// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value +/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI, DIBuilder &Builder); -/// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value +/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, LoadInst *LI, DIBuilder &Builder); @@ -178,6 +267,42 @@ bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, /// of llvm.dbg.value intrinsics. bool LowerDbgDeclare(Function &F); +/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to +/// an alloca, if any. +DbgDeclareInst *FindAllocaDbgDeclare(Value *V); + +/// \brief Replaces llvm.dbg.declare instruction when an alloca is replaced with +/// a new value. If Deref is true, tan additional DW_OP_deref is prepended to +/// the expression. +bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, + DIBuilder &Builder, bool Deref); + +/// Replace 'BB's terminator with one that does not have an unwind successor +/// block. Rewrites `invoke` to `call`, `catchendpad unwind label %foo` to +/// `catchendpad unwind to caller`, etc. Updates any PHIs in unwind successor. +/// +/// \param BB Block whose terminator will be replaced. Its terminator must +/// have an unwind successor. +void removeUnwindEdge(BasicBlock *BB); + +/// \brief Remove all blocks that can not be reached from the function's entry. +/// +/// Returns true if any basic block was removed. +bool removeUnreachableBlocks(Function &F); + +/// \brief Combine the metadata of two instructions so that K can replace J +/// +/// Metadata not listed as known via KnownIDs is removed +void combineMetadata(Instruction *K, const Instruction *J, ArrayRef KnownIDs); + +/// \brief Replace each use of 'From' with 'To' if that use is dominated by +/// the given edge. Returns the number of replacements made. +unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, + const BasicBlockEdge &Edge); +/// \brief Replace each use of 'From' with 'To' if that use is dominated by +/// the given BasicBlock. Returns the number of replacements made. +unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, + const BasicBlock *BB); } // End llvm namespace #endif