X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FInstCombine%2FInstructionCombining.cpp;h=3ae7f08238b20f9137d44748cbe6953676b374fc;hp=a3a29b4e0bd7854eb18edf6e8039b829cea07f51;hb=f79c5ce90bae0f60ffa44491a6eccf5af18deb60;hpb=5307076e20b2bbe6ef61cf7717c37b7e3fe27cdc diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index a3a29b4e0bd..3ae7f08238b 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -33,16 +33,21 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Scalar.h" -#include "InstCombine.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" +#include "InstCombineInternal.h" #include "llvm-c/Initialization.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringSwitch.h" -#include "llvm/Analysis/AssumptionTracker.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/CFG.h" #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/LibCallSemantics.h" +#include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/DataLayout.h" @@ -53,7 +58,8 @@ #include "llvm/IR/ValueHandle.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" -#include "llvm/Target/TargetLibraryInfo.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include #include @@ -70,56 +76,18 @@ STATISTIC(NumExpand, "Number of expansions"); STATISTIC(NumFactor , "Number of factorizations"); STATISTIC(NumReassoc , "Number of reassociations"); -static cl::opt - EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden, - cl::init(false), - cl::desc("Enable unsafe double to float " - "shrinking for math lib calls")); - -// Initialization Routines -void llvm::initializeInstCombine(PassRegistry &Registry) { - initializeInstCombinerPass(Registry); -} - -void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { - initializeInstCombine(*unwrap(R)); -} - -char InstCombiner::ID = 0; -INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine", - "Combine redundant instructions", false, false) -INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) -INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) -INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_END(InstCombiner, "instcombine", - "Combine redundant instructions", false, false) - -void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); - AU.addRequired(); - AU.addRequired(); - AU.addRequired(); - AU.addPreserved(); -} - - Value *InstCombiner::EmitGEPOffset(User *GEP) { - return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP); + return llvm::EmitGEPOffset(Builder, DL, GEP); } -/// ShouldChangeType - Return true if it is desirable to convert a computation -/// from 'From' to 'To'. We don't want to convert from a legal to an illegal -/// type for example, or from a smaller to a larger illegal type. -bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { - assert(From->isIntegerTy() && To->isIntegerTy()); - - // If we don't have DL, we don't know if the source/dest are legal. - if (!DL) return false; - - unsigned FromWidth = From->getPrimitiveSizeInBits(); - unsigned ToWidth = To->getPrimitiveSizeInBits(); - bool FromLegal = DL->isLegalInteger(FromWidth); - bool ToLegal = DL->isLegalInteger(ToWidth); +/// Return true if it is desirable to convert an integer computation from a +/// given bit width to a new bit width. +/// We don't want to convert from a legal to an illegal type for example or from +/// a smaller to a larger illegal type. +bool InstCombiner::ShouldChangeType(unsigned FromWidth, + unsigned ToWidth) const { + bool FromLegal = DL.isLegalInteger(FromWidth); + bool ToLegal = DL.isLegalInteger(ToWidth); // If this is a legal integer from type, and the result would be an illegal // type, don't do the transformation. @@ -134,6 +102,17 @@ bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { return true; } +/// Return true if it is desirable to convert a computation from 'From' to 'To'. +/// We don't want to convert from a legal to an illegal type for example or from +/// a smaller to a larger illegal type. +bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { + assert(From->isIntegerTy() && To->isIntegerTy()); + + unsigned FromWidth = From->getPrimitiveSizeInBits(); + unsigned ToWidth = To->getPrimitiveSizeInBits(); + return ShouldChangeType(FromWidth, ToWidth); +} + // Return true, if No Signed Wrap should be maintained for I. // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", // where both B and C should be ConstantInts, results in a constant that does @@ -187,27 +166,26 @@ static void ClearSubclassDataAfterReassociation(BinaryOperator &I) { I.setFastMathFlags(FMF); } -/// SimplifyAssociativeOrCommutative - This performs a few simplifications for -/// operators which are associative or commutative: -// -// Commutative operators: -// -// 1. Order operands such that they are listed from right (least complex) to -// left (most complex). This puts constants before unary operators before -// binary operators. -// -// Associative operators: -// -// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. -// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. -// -// Associative and commutative operators: -// -// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. -// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. -// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" -// if C1 and C2 are constants. -// +/// This performs a few simplifications for operators that are associative or +/// commutative: +/// +/// Commutative operators: +/// +/// 1. Order operands such that they are listed from right (least complex) to +/// left (most complex). This puts constants before unary operators before +/// binary operators. +/// +/// Associative operators: +/// +/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. +/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. +/// +/// Associative and commutative operators: +/// +/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. +/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. +/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" +/// if C1 and C2 are constants. bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { Instruction::BinaryOps Opcode = I.getOpcode(); bool Changed = false; @@ -353,7 +331,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { } while (1); } -/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to +/// Return whether "X LOp (Y ROp Z)" is always equal to /// "(X LOp Y) ROp (X LOp Z)". static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp) { @@ -392,7 +370,7 @@ static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, } } -/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to +/// Return whether "(X LOp Y) ROp Z" is always equal to /// "(X ROp Z) LOp (Y ROp Z)". static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp) { @@ -474,7 +452,7 @@ getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode, /// This tries to simplify binary operations by factorizing out common terms /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). static Value *tryFactorization(InstCombiner::BuilderTy *Builder, - const DataLayout *DL, BinaryOperator &I, + const DataLayout &DL, BinaryOperator &I, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D) { @@ -483,6 +461,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder, if (!A || !C || !B || !D) return nullptr; + Value *V = nullptr; Value *SimplifiedInst = nullptr; Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); @@ -499,7 +478,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder, std::swap(C, D); // Consider forming "A op' (B op D)". // If "B op D" simplifies then it can be formed with no cost. - Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL); + V = SimplifyBinOp(TopLevelOpcode, B, D, DL); // If "B op D" doesn't simplify then only go on if both of the existing // operations "A op' B" and "C op' D" will be zapped as no longer used. if (!V && LHS->hasOneUse() && RHS->hasOneUse()) @@ -518,7 +497,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder, std::swap(C, D); // Consider forming "(A op C) op' B". // If "A op C" simplifies then it can be formed with no cost. - Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL); + V = SimplifyBinOp(TopLevelOpcode, A, C, DL); // If "A op C" doesn't simplify then only go on if both of the existing // operations "A op' B" and "C op' D" will be zapped as no longer used. @@ -548,18 +527,30 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder, if (BinaryOperator *Op1 = dyn_cast(RHS)) if (isa(Op1)) HasNSW &= Op1->hasNoSignedWrap(); - BO->setHasNoSignedWrap(HasNSW); + + // We can propagate 'nsw' if we know that + // %Y = mul nsw i16 %X, C + // %Z = add nsw i16 %Y, %X + // => + // %Z = mul nsw i16 %X, C+1 + // + // iff C+1 isn't INT_MIN + const APInt *CInt; + if (TopLevelOpcode == Instruction::Add && + InnerOpcode == Instruction::Mul) + if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue()) + BO->setHasNoSignedWrap(HasNSW); } } } return SimplifiedInst; } -/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations -/// which some other binary operation distributes over either by factorizing -/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this -/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is -/// a win). Returns the simplified value, or null if it didn't simplify. +/// This tries to simplify binary operations which some other binary operation +/// distributes over either by factorizing out common terms +/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in +/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win). +/// Returns the simplified value, or null if it didn't simplify. Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); BinaryOperator *Op0 = dyn_cast(LHS); @@ -641,12 +632,38 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { } } + // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0)) + // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d))) + if (auto *SI0 = dyn_cast(LHS)) { + if (auto *SI1 = dyn_cast(RHS)) { + if (SI0->getCondition() == SI1->getCondition()) { + Value *SI = nullptr; + if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(), + SI1->getFalseValue(), DL, TLI, DT, AC)) + SI = Builder->CreateSelect(SI0->getCondition(), + Builder->CreateBinOp(TopLevelOpcode, + SI0->getTrueValue(), + SI1->getTrueValue()), + V); + if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(), + SI1->getTrueValue(), DL, TLI, DT, AC)) + SI = Builder->CreateSelect( + SI0->getCondition(), V, + Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(), + SI1->getFalseValue())); + if (SI) { + SI->takeName(&I); + return SI; + } + } + } + } + return nullptr; } -// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction -// if the LHS is a constant zero (which is the 'negate' form). -// +/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a +/// constant zero (which is the 'negate' form). Value *InstCombiner::dyn_castNegVal(Value *V) const { if (BinaryOperator::isNeg(V)) return BinaryOperator::getNegArgument(V); @@ -662,10 +679,8 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const { return nullptr; } -// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the -// instruction if the LHS is a constant negative zero (which is the 'negate' -// form). -// +/// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is +/// a constant negative zero (which is the 'negate' form). Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const { if (BinaryOperator::isFNeg(V, IgnoreZeroSign)) return BinaryOperator::getFNegArgument(V); @@ -718,10 +733,10 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, llvm_unreachable("Unknown binary instruction type!"); } -// FoldOpIntoSelect - Given an instruction with a select as one operand and a -// constant as the other operand, try to fold the binary operator into the -// select arguments. This also works for Cast instructions, which obviously do -// not have a second operand. +/// Given an instruction with a select as one operand and a constant as the +/// other operand, try to fold the binary operator into the select arguments. +/// This also works for Cast instructions, which obviously do not have a second +/// operand. Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { // Don't modify shared select instructions if (!SI->hasOneUse()) return nullptr; @@ -745,6 +760,22 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { return nullptr; } + // Test if a CmpInst instruction is used exclusively by a select as + // part of a minimum or maximum operation. If so, refrain from doing + // any other folding. This helps out other analyses which understand + // non-obfuscated minimum and maximum idioms, such as ScalarEvolution + // and CodeGen. And in this case, at least one of the comparison + // operands has at least one user besides the compare (the select), + // which would often largely negate the benefit of folding anyway. + if (auto *CI = dyn_cast(SI->getCondition())) { + if (CI->hasOneUse()) { + Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); + if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || + (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) + return nullptr; + } + } + Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); @@ -754,11 +785,9 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { return nullptr; } - -/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which -/// has a PHI node as operand #0, see if we can fold the instruction into the -/// PHI (which is only possible if all operands to the PHI are constants). -/// +/// Given a binary operator, cast instruction, or select which has a PHI node as +/// operand #0, see if we can fold the instruction into the PHI (which is only +/// possible if all operands to the PHI are constants). Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { PHINode *PN = cast(I.getOperand(0)); unsigned NumPHIValues = PN->getNumIncomingValues(); @@ -803,13 +832,13 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { // If the incoming non-constant value is in I's block, we will remove one // instruction, but insert another equivalent one, leading to infinite // instcombine. - if (NonConstBB == I.getParent()) + if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI)) return nullptr; } // If there is exactly one non-constant value, we can insert a copy of the // operation in that block. However, if this is a critical edge, we would be - // inserting the computation one some other paths (e.g. inside a loop). Only + // inserting the computation on some other paths (e.g. inside a loop). Only // do this if the pred block is unconditionally branching into the phi block. if (NonConstBB != nullptr) { BranchInst *BI = dyn_cast(NonConstBB->getTerminator()); @@ -822,7 +851,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { NewPN->takeName(PN); // If we are going to have to insert a new computation, do so right before the - // predecessors terminator. + // predecessor's terminator. if (NonConstBB) Builder->SetInsertPoint(NonConstBB->getTerminator()); @@ -896,27 +925,22 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { return ReplaceInstUsesWith(I, NewPN); } -/// FindElementAtOffset - Given a pointer type and a constant offset, determine -/// whether or not there is a sequence of GEP indices into the pointed type that -/// will land us at the specified offset. If so, fill them into NewIndices and -/// return the resultant element type, otherwise return null. -Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset, - SmallVectorImpl &NewIndices) { - assert(PtrTy->isPtrOrPtrVectorTy()); - - if (!DL) - return nullptr; - - Type *Ty = PtrTy->getPointerElementType(); +/// Given a pointer type and a constant offset, determine whether or not there +/// is a sequence of GEP indices into the pointed type that will land us at the +/// specified offset. If so, fill them into NewIndices and return the resultant +/// element type, otherwise return null. +Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset, + SmallVectorImpl &NewIndices) { + Type *Ty = PtrTy->getElementType(); if (!Ty->isSized()) return nullptr; // Start with the index over the outer type. Note that the type size // might be zero (even if the offset isn't zero) if the indexed type // is something like [0 x {int, int}] - Type *IntPtrTy = DL->getIntPtrType(PtrTy); + Type *IntPtrTy = DL.getIntPtrType(PtrTy); int64_t FirstIdx = 0; - if (int64_t TySize = DL->getTypeAllocSize(Ty)) { + if (int64_t TySize = DL.getTypeAllocSize(Ty)) { FirstIdx = Offset/TySize; Offset -= FirstIdx*TySize; @@ -934,11 +958,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset, // Index into the types. If we fail, set OrigBase to null. while (Offset) { // Indexing into tail padding between struct/array elements. - if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty)) + if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty)) return nullptr; if (StructType *STy = dyn_cast(Ty)) { - const StructLayout *SL = DL->getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); assert(Offset < (int64_t)SL->getSizeInBytes() && "Offset must stay within the indexed type"); @@ -949,7 +973,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset, Offset -= SL->getElementOffset(Elt); Ty = STy->getElementType(Elt); } else if (ArrayType *AT = dyn_cast(Ty)) { - uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType()); + uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType()); assert(EltSize && "Cannot index into a zero-sized array"); NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); Offset %= EltSize; @@ -973,8 +997,8 @@ static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { return true; } -/// Descale - Return a value X such that Val = X * Scale, or null if none. If -/// the multiplication is known not to overflow then NoSignedWrap is set. +/// Return a value X such that Val = X * Scale, or null if none. +/// If the multiplication is known not to overflow, then NoSignedWrap is set. Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { assert(isa(Val->getType()) && "Can only descale integers!"); assert(cast(Val->getType())->getBitWidth() == @@ -1016,11 +1040,11 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { // 0'th operand of Val. std::pair Parent; - // RequireNoSignedWrap - Set if the transform requires a descaling at deeper - // levels that doesn't overflow. + // Set if the transform requires a descaling at deeper levels that doesn't + // overflow. bool RequireNoSignedWrap = false; - // logScale - log base 2 of the scale. Negative if not a power of 2. + // Log base 2 of the scale. Negative if not a power of 2. int32_t logScale = Scale.exactLogBase2(); for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down @@ -1243,7 +1267,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) { // It may not be safe to reorder shuffles and things like div, urem, etc. // because we may trap when executing those ops on unknown vector elements. // See PR20059. - if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr; + if (!isSafeToSpeculativelyExecute(&Inst)) + return nullptr; unsigned VWidth = cast(Inst.getType())->getNumElements(); Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1); @@ -1322,44 +1347,49 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) { Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { SmallVector Ops(GEP.op_begin(), GEP.op_end()); - if (Value *V = SimplifyGEPInst(Ops, DL, TLI, DT, AT)) + if (Value *V = SimplifyGEPInst(Ops, DL, TLI, DT, AC)) return ReplaceInstUsesWith(GEP, V); Value *PtrOp = GEP.getOperand(0); // Eliminate unneeded casts for indices, and replace indices which displace // by multiples of a zero size type with zero. - if (DL) { - bool MadeChange = false; - Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType()); - - gep_type_iterator GTI = gep_type_begin(GEP); - for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); - I != E; ++I, ++GTI) { - // Skip indices into struct types. - SequentialType *SeqTy = dyn_cast(*GTI); - if (!SeqTy) continue; - - // If the element type has zero size then any index over it is equivalent - // to an index of zero, so replace it with zero if it is not zero already. - if (SeqTy->getElementType()->isSized() && - DL->getTypeAllocSize(SeqTy->getElementType()) == 0) - if (!isa(*I) || !cast(*I)->isNullValue()) { - *I = Constant::getNullValue(IntPtrTy); - MadeChange = true; - } + bool MadeChange = false; + Type *IntPtrTy = + DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType()); + + gep_type_iterator GTI = gep_type_begin(GEP); + for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; + ++I, ++GTI) { + // Skip indices into struct types. + SequentialType *SeqTy = dyn_cast(*GTI); + if (!SeqTy) + continue; - Type *IndexTy = (*I)->getType(); - if (IndexTy != IntPtrTy) { - // If we are using a wider index than needed for this platform, shrink - // it to what we need. If narrower, sign-extend it to what we need. - // This explicit cast can make subsequent optimizations more obvious. - *I = Builder->CreateIntCast(*I, IntPtrTy, true); + // Index type should have the same width as IntPtr + Type *IndexTy = (*I)->getType(); + Type *NewIndexType = IndexTy->isVectorTy() ? + VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy; + + // If the element type has zero size then any index over it is equivalent + // to an index of zero, so replace it with zero if it is not zero already. + if (SeqTy->getElementType()->isSized() && + DL.getTypeAllocSize(SeqTy->getElementType()) == 0) + if (!isa(*I) || !cast(*I)->isNullValue()) { + *I = Constant::getNullValue(NewIndexType); MadeChange = true; } + + if (IndexTy != NewIndexType) { + // If we are using a wider index than needed for this platform, shrink + // it to what we need. If narrower, sign-extend it to what we need. + // This explicit cast can make subsequent optimizations more obvious. + *I = Builder->CreateIntCast(*I, NewIndexType, true); + MadeChange = true; } - if (MadeChange) return &GEP; } + if (MadeChange) + return &GEP; // Check to see if the inputs to the PHI node are getelementptr instructions. if (PHINode *PN = dyn_cast(PtrOp)) { @@ -1367,6 +1397,15 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (!Op1) return nullptr; + // Don't fold a GEP into itself through a PHI node. This can only happen + // through the back-edge of a loop. Folding a GEP into itself means that + // the value of the previous iteration needs to be stored in the meantime, + // thus requiring an additional register variable to be live, but not + // actually achieving anything (the GEP still needs to be executed once per + // loop iteration). + if (Op1 == &GEP) + return nullptr; + signed DI = -1; for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) { @@ -1374,6 +1413,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands()) return nullptr; + // As for Op1 above, don't try to fold a GEP into itself. + if (Op2 == &GEP) + return nullptr; + // Keep track of the type as we walk the GEP. Type *CurTy = Op1->getOperand(0)->getType()->getScalarType(); @@ -1415,30 +1458,37 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } } - GetElementPtrInst *NewGEP = cast(Op1->clone()); + // If not all GEPs are identical we'll have to create a new PHI node. + // Check that the old PHI node has only one use so that it will get + // removed. + if (DI != -1 && !PN->hasOneUse()) + return nullptr; + GetElementPtrInst *NewGEP = cast(Op1->clone()); if (DI == -1) { // All the GEPs feeding the PHI are identical. Clone one down into our // BB so that it can be merged with the current GEP. - GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(), - NewGEP); + GEP.getParent()->getInstList().insert( + GEP.getParent()->getFirstInsertionPt(), NewGEP); } else { // All the GEPs feeding the PHI differ at a single offset. Clone a GEP // into the current block so it can be merged, and create a new PHI to // set that index. - Instruction *InsertPt = Builder->GetInsertPoint(); - Builder->SetInsertPoint(PN); - PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), - PN->getNumOperands()); - Builder->SetInsertPoint(InsertPt); + PHINode *NewPN; + { + IRBuilderBase::InsertPointGuard Guard(*Builder); + Builder->SetInsertPoint(PN); + NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), + PN->getNumOperands()); + } for (auto &I : PN->operands()) NewPN->addIncoming(cast(I)->getOperand(DI), PN->getIncomingBlock(I)); NewGEP->setOperand(DI, NewPN); - GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(), - NewGEP); + GEP.getParent()->getInstList().insert( + GEP.getParent()->getFirstInsertionPt(), NewGEP); NewGEP->setOperand(DI, NewPN); } @@ -1489,6 +1539,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // normalized. if (SO1->getType() != GO1->getType()) return nullptr; + // Only do the combine when GO1 and SO1 are both constants. Only in + // this case, we are sure the cost after the merge is never more than + // that before the merge. + if (!isa(GO1) || !isa(SO1)) + return nullptr; Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); } @@ -1510,19 +1565,22 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } if (!Indices.empty()) - return (GEP.isInBounds() && Src->isInBounds()) ? - GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices, - GEP.getName()) : - GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName()); + return GEP.isInBounds() && Src->isInBounds() + ? GetElementPtrInst::CreateInBounds( + Src->getSourceElementType(), Src->getOperand(0), Indices, + GEP.getName()) + : GetElementPtrInst::Create(Src->getSourceElementType(), + Src->getOperand(0), Indices, + GEP.getName()); } - if (DL && GEP.getNumIndices() == 1) { + if (GEP.getNumIndices() == 1) { unsigned AS = GEP.getPointerAddressSpace(); if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == - DL->getPointerSizeInBits(AS)) { + DL.getPointerSizeInBits(AS)) { Type *PtrTy = GEP.getPointerOperandType(); Type *Ty = PtrTy->getPointerElementType(); - uint64_t TyAllocSize = DL->getTypeAllocSize(Ty); + uint64_t TyAllocSize = DL.getTypeAllocSize(Ty); bool Matched = false; uint64_t C; @@ -1591,8 +1649,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (CATy->getElementType() == StrippedPtrTy->getElementType()) { // -> GEP i8* X, ... SmallVector Idx(GEP.idx_begin()+1, GEP.idx_end()); - GetElementPtrInst *Res = - GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName()); + GetElementPtrInst *Res = GetElementPtrInst::Create( + StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName()); Res->setIsInBounds(GEP.isInBounds()); if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) return Res; @@ -1616,6 +1674,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // is a leading zero) we can fold the cast into this GEP. if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) { GEP.setOperand(0, StrippedPtr); + GEP.setSourceElementType(XATy); return &GEP; } // Cannot replace the base pointer directly because StrippedPtr's @@ -1628,9 +1687,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // %0 = GEP [10 x i8] addrspace(1)* X, ... // addrspacecast i8 addrspace(1)* %0 to i8* SmallVector Idx(GEP.idx_begin(), GEP.idx_end()); - Value *NewGEP = GEP.isInBounds() ? - Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : - Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); + Value *NewGEP = GEP.isInBounds() + ? Builder->CreateInBoundsGEP( + nullptr, StrippedPtr, Idx, GEP.getName()) + : Builder->CreateGEP(nullptr, StrippedPtr, Idx, + GEP.getName()); return new AddrSpaceCastInst(NewGEP, GEP.getType()); } } @@ -1641,14 +1702,16 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast Type *SrcElTy = StrippedPtrTy->getElementType(); Type *ResElTy = PtrOp->getType()->getPointerElementType(); - if (DL && SrcElTy->isArrayTy() && - DL->getTypeAllocSize(SrcElTy->getArrayElementType()) == - DL->getTypeAllocSize(ResElTy)) { - Type *IdxType = DL->getIntPtrType(GEP.getType()); + if (SrcElTy->isArrayTy() && + DL.getTypeAllocSize(SrcElTy->getArrayElementType()) == + DL.getTypeAllocSize(ResElTy)) { + Type *IdxType = DL.getIntPtrType(GEP.getType()); Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) }; - Value *NewGEP = GEP.isInBounds() ? - Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : - Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); + Value *NewGEP = + GEP.isInBounds() + ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, Idx, + GEP.getName()) + : Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName()); // V and GEP are both pointer types --> BitCast return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, @@ -1659,11 +1722,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // %V = mul i64 %N, 4 // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast - if (DL && ResElTy->isSized() && SrcElTy->isSized()) { + if (ResElTy->isSized() && SrcElTy->isSized()) { // Check that changing the type amounts to dividing the index by a scale // factor. - uint64_t ResSize = DL->getTypeAllocSize(ResElTy); - uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy); + uint64_t ResSize = DL.getTypeAllocSize(ResElTy); + uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy); if (ResSize && SrcSize % ResSize == 0) { Value *Idx = GEP.getOperand(1); unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); @@ -1671,7 +1734,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Earlier transforms ensure that the index has type IntPtrType, which // considerably simplifies the logic by eliminating implicit casts. - assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) && + assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) && "Index not cast to pointer width?"); bool NSW; @@ -1679,9 +1742,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. // If the multiplication NewIdx * Scale may overflow then the new // GEP may not be "inbounds". - Value *NewGEP = GEP.isInBounds() && NSW ? - Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) : - Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName()); + Value *NewGEP = + GEP.isInBounds() && NSW + ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx, + GEP.getName()) + : Builder->CreateGEP(nullptr, StrippedPtr, NewIdx, + GEP.getName()); // The NewGEP must be pointer typed, so must the old one -> BitCast return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, @@ -1694,13 +1760,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp // (where tmp = 8*tmp2) into: // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast - if (DL && ResElTy->isSized() && SrcElTy->isSized() && - SrcElTy->isArrayTy()) { + if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) { // Check that changing to the array element type amounts to dividing the // index by a scale factor. - uint64_t ResSize = DL->getTypeAllocSize(ResElTy); - uint64_t ArrayEltSize - = DL->getTypeAllocSize(SrcElTy->getArrayElementType()); + uint64_t ResSize = DL.getTypeAllocSize(ResElTy); + uint64_t ArrayEltSize = + DL.getTypeAllocSize(SrcElTy->getArrayElementType()); if (ResSize && ArrayEltSize % ResSize == 0) { Value *Idx = GEP.getOperand(1); unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); @@ -1708,7 +1773,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Earlier transforms ensure that the index has type IntPtrType, which // considerably simplifies the logic by eliminating implicit casts. - assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) && + assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) && "Index not cast to pointer width?"); bool NSW; @@ -1717,13 +1782,14 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // If the multiplication NewIdx * Scale may overflow then the new // GEP may not be "inbounds". Value *Off[2] = { - Constant::getNullValue(DL->getIntPtrType(GEP.getType())), - NewIdx - }; - - Value *NewGEP = GEP.isInBounds() && NSW ? - Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) : - Builder->CreateGEP(StrippedPtr, Off, GEP.getName()); + Constant::getNullValue(DL.getIntPtrType(GEP.getType())), + NewIdx}; + + Value *NewGEP = GEP.isInBounds() && NSW + ? Builder->CreateInBoundsGEP( + SrcElTy, StrippedPtr, Off, GEP.getName()) + : Builder->CreateGEP(SrcElTy, StrippedPtr, Off, + GEP.getName()); // The NewGEP must be pointer typed, so must the old one -> BitCast return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, GEP.getType()); @@ -1733,9 +1799,6 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } } - if (!DL) - return nullptr; - // addrspacecast between types is canonicalized as a bitcast, then an // addrspacecast. To take advantage of the below bitcast + struct GEP, look // through the addrspacecast. @@ -1756,10 +1819,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (BitCastInst *BCI = dyn_cast(PtrOp)) { Value *Operand = BCI->getOperand(0); PointerType *OpType = cast(Operand->getType()); - unsigned OffsetBits = DL->getPointerTypeSizeInBits(GEP.getType()); + unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType()); APInt Offset(OffsetBits, 0); if (!isa(Operand) && - GEP.accumulateConstantOffset(*DL, Offset)) { + GEP.accumulateConstantOffset(DL, Offset)) { // If this GEP instruction doesn't move the pointer, just replace the GEP // with a bitcast of the real input to the dest type. @@ -1771,7 +1834,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (Instruction *I = visitBitCast(*BCI)) { if (I != BCI) { I->takeName(BCI); - BCI->getParent()->getInstList().insert(BCI, I); + BCI->getParent()->getInstList().insert(BCI->getIterator(), I); ReplaceInstUsesWith(*BCI, I); } return &GEP; @@ -1788,9 +1851,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // GEP. SmallVector NewIndices; if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) { - Value *NGEP = GEP.isInBounds() ? - Builder->CreateInBoundsGEP(Operand, NewIndices) : - Builder->CreateGEP(Operand, NewIndices); + Value *NGEP = + GEP.isInBounds() + ? Builder->CreateInBoundsGEP(nullptr, Operand, NewIndices) + : Builder->CreateGEP(nullptr, Operand, NewIndices); if (NGEP->getType() == GEP.getType()) return ReplaceInstUsesWith(GEP, NGEP); @@ -1823,7 +1887,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, case Instruction::BitCast: case Instruction::GetElementPtr: - Users.push_back(I); + Users.emplace_back(I); Worklist.push_back(I); continue; @@ -1832,7 +1896,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, // We can fold eq/ne comparisons with null to false/true, respectively. if (!ICI->isEquality() || !isa(ICI->getOperand(1))) return false; - Users.push_back(I); + Users.emplace_back(I); continue; } @@ -1858,13 +1922,13 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: case Intrinsic::objectsize: - Users.push_back(I); + Users.emplace_back(I); continue; } } if (isFreeCall(I, TLI)) { - Users.push_back(I); + Users.emplace_back(I); continue; } return false; @@ -1873,7 +1937,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, StoreInst *SI = cast(I); if (SI->isVolatile() || SI->getPointerOperand() != PI) return false; - Users.push_back(I); + Users.emplace_back(I); continue; } } @@ -2041,6 +2105,15 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } + // If the condition is irrelevant, remove the use so that other + // transforms on the condition become more effective. + if (BI.isConditional() && + BI.getSuccessor(0) == BI.getSuccessor(1) && + !isa(BI.getCondition())) { + BI.setCondition(UndefValue::get(BI.getCondition()->getType())); + return &BI; + } + // Canonicalize fcmp_one -> fcmp_oeq FCmpInst::Predicate FPred; Value *Y; if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), @@ -2078,6 +2151,40 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { Value *Cond = SI.getCondition(); + unsigned BitWidth = cast(Cond->getType())->getBitWidth(); + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI); + unsigned LeadingKnownZeros = KnownZero.countLeadingOnes(); + unsigned LeadingKnownOnes = KnownOne.countLeadingOnes(); + + // Compute the number of leading bits we can ignore. + for (auto &C : SI.cases()) { + LeadingKnownZeros = std::min( + LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros()); + LeadingKnownOnes = std::min( + LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes()); + } + + unsigned NewWidth = BitWidth - std::max(LeadingKnownZeros, LeadingKnownOnes); + + // Truncate the condition operand if the new type is equal to or larger than + // the largest legal integer type. We need to be conservative here since + // x86 generates redundant zero-extension instructions if the operand is + // truncated to i8 or i16. + bool TruncCond = false; + if (NewWidth > 0 && BitWidth > NewWidth && + NewWidth >= DL.getLargestLegalIntTypeSize()) { + TruncCond = true; + IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth); + Builder->SetInsertPoint(&SI); + Value *NewCond = Builder->CreateTrunc(SI.getCondition(), Ty, "trunc"); + SI.setCondition(NewCond); + + for (auto &C : SI.cases()) + static_cast(&C)->setValue(ConstantInt::get( + SI.getContext(), C.getCaseValue()->getValue().trunc(NewWidth))); + } + if (Instruction *I = dyn_cast(Cond)) { if (I->getOpcode() == Instruction::Add) if (ConstantInt *AddRHS = dyn_cast(I->getOperand(1))) { @@ -2086,8 +2193,12 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) { ConstantInt* CaseVal = i.getCaseValue(); - Constant* NewCaseVal = ConstantExpr::getSub(cast(CaseVal), - AddRHS); + Constant *LHS = CaseVal; + if (TruncCond) + LHS = LeadingKnownZeros + ? ConstantExpr::getZExt(CaseVal, Cond->getType()) + : ConstantExpr::getSExt(CaseVal, Cond->getType()); + Constant* NewCaseVal = ConstantExpr::getSub(LHS, AddRHS); assert(isa(NewCaseVal) && "Result of expression should be constant"); i.setValue(cast(NewCaseVal)); @@ -2097,7 +2208,8 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { return &SI; } } - return nullptr; + + return TruncCond ? &SI : nullptr; } Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { @@ -2106,16 +2218,9 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { if (!EV.hasIndices()) return ReplaceInstUsesWith(EV, Agg); - if (Constant *C = dyn_cast(Agg)) { - if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) { - if (EV.getNumIndices() == 0) - return ReplaceInstUsesWith(EV, C2); - // Extract the remaining indices out of the constant indexed by the - // first index - return ExtractValueInst::Create(C2, EV.getIndices().slice(1)); - } - return nullptr; // Can't handle other constants - } + if (Value *V = + SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC)) + return ReplaceInstUsesWith(EV, V); if (InsertValueInst *IV = dyn_cast(Agg)) { // We're extracting from an insertvalue instruction, compare the indices @@ -2233,8 +2338,9 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { // We need to insert these at the location of the old load, not at that of // the extractvalue. - Builder->SetInsertPoint(L->getParent(), L); - Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices); + Builder->SetInsertPoint(L); + Value *GEP = Builder->CreateInBoundsGEP(L->getType(), + L->getPointerOperand(), Indices); // Returning the load directly will cause the main loop to insert it in // the wrong spot, so use ReplaceInstUsesWith(). return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); @@ -2250,41 +2356,28 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { return nullptr; } -enum Personality_Type { - Unknown_Personality, - GNU_Ada_Personality, - GNU_CXX_Personality, - GNU_ObjC_Personality -}; - -/// RecognizePersonality - See if the given exception handling personality -/// function is one that we understand. If so, return a description of it; -/// otherwise return Unknown_Personality. -static Personality_Type RecognizePersonality(Value *Pers) { - Function *F = dyn_cast(Pers->stripPointerCasts()); - if (!F) - return Unknown_Personality; - return StringSwitch(F->getName()) - .Case("__gnat_eh_personality", GNU_Ada_Personality) - .Case("__gxx_personality_v0", GNU_CXX_Personality) - .Case("__objc_personality_v0", GNU_ObjC_Personality) - .Default(Unknown_Personality); -} - -/// isCatchAll - Return 'true' if the given typeinfo will match anything. -static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) { +/// Return 'true' if the given typeinfo will match anything. +static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { switch (Personality) { - case Unknown_Personality: + case EHPersonality::GNU_C: + // The GCC C EH personality only exists to support cleanups, so it's not + // clear what the semantics of catch clauses are. return false; - case GNU_Ada_Personality: + case EHPersonality::Unknown: + return false; + case EHPersonality::GNU_Ada: // While __gnat_all_others_value will match any Ada exception, it doesn't // match foreign exceptions (or didn't, before gcc-4.7). return false; - case GNU_CXX_Personality: - case GNU_ObjC_Personality: + case EHPersonality::GNU_CXX: + case EHPersonality::GNU_ObjC: + case EHPersonality::MSVC_X86SEH: + case EHPersonality::MSVC_Win64SEH: + case EHPersonality::MSVC_CXX: + case EHPersonality::CoreCLR: return TypeInfo->isNullValue(); } - llvm_unreachable("Unknown personality!"); + llvm_unreachable("invalid enum"); } static bool shorter_filter(const Value *LHS, const Value *RHS) { @@ -2298,7 +2391,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { // The logic here should be correct for any real-world personality function. // However if that turns out not to be true, the offending logic can always // be conditioned on the personality function, like the catch-all logic is. - Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn()); + EHPersonality Personality = + classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn()); // Simplify the list of clauses, eg by removing repeated catch clauses // (these are often created by inlining). @@ -2316,7 +2410,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { // If we already saw this clause, there is no point in having a second // copy of it. - if (AlreadyCaught.insert(TypeInfo)) { + if (AlreadyCaught.insert(TypeInfo).second) { // This catch clause was not already seen. NewClauses.push_back(CatchClause); } else { @@ -2398,7 +2492,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { continue; // There is no point in having multiple copies of the same typeinfo in // a filter, so only add it if we didn't already. - if (SeenInFilter.insert(TypeInfo)) + if (SeenInFilter.insert(TypeInfo).second) NewFilterElts.push_back(cast(Elt)); } // A filter containing a catch-all cannot match anything by definition. @@ -2565,7 +2659,6 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { // with a new one. if (MakeNewInstruction) { LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), - LI.getPersonalityFn(), NewClauses.size()); for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) NLI->addClause(NewClauses[i]); @@ -2589,18 +2682,15 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { return nullptr; } - - - -/// TryToSinkInstruction - Try to move the specified instruction from its -/// current block into the beginning of DestBlock, which can only happen if it's -/// safe to move the instruction past all of the instructions between it and the -/// end of its block. +/// Try to move the specified instruction from its current block into the +/// beginning of DestBlock, which can only happen if it's safe to move the +/// instruction past all of the instructions between it and the end of its +/// block. static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { assert(I->hasOneUse() && "Invariants didn't hold!"); // Cannot move control-flow-involving, volatile loads, vaarg, etc. - if (isa(I) || isa(I) || I->mayHaveSideEffects() || + if (isa(I) || I->isEHPad() || I->mayHaveSideEffects() || isa(I)) return false; @@ -2609,178 +2699,29 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { &DestBlock->getParent()->getEntryBlock()) return false; + // Do not sink convergent call instructions. + if (auto *CI = dyn_cast(I)) { + if (CI->isConvergent()) + return false; + } + // We can only sink load instructions if there is nothing between the load and // the end of block that could change the value. if (I->mayReadFromMemory()) { - for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); + for (BasicBlock::iterator Scan = I->getIterator(), + E = I->getParent()->end(); Scan != E; ++Scan) if (Scan->mayWriteToMemory()) return false; } BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); - I->moveBefore(InsertPos); + I->moveBefore(&*InsertPos); ++NumSunkInst; return true; } - -/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding -/// all reachable code to the worklist. -/// -/// This has a couple of tricks to make the code faster and more powerful. In -/// particular, we constant fold and DCE instructions as we go, to avoid adding -/// them to the worklist (this significantly speeds up instcombine on code where -/// many instructions are dead or constant). Additionally, if we find a branch -/// whose condition is a known constant, we only visit the reachable successors. -/// -static bool AddReachableCodeToWorklist(BasicBlock *BB, - SmallPtrSetImpl &Visited, - InstCombiner &IC, - const DataLayout *DL, - const TargetLibraryInfo *TLI) { - bool MadeIRChange = false; - SmallVector Worklist; - Worklist.push_back(BB); - - SmallVector InstrsForInstCombineWorklist; - DenseMap FoldedConstants; - - do { - BB = Worklist.pop_back_val(); - - // We have now visited this block! If we've already been here, ignore it. - if (!Visited.insert(BB)) continue; - - for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { - Instruction *Inst = BBI++; - - // DCE instruction if trivially dead. - if (isInstructionTriviallyDead(Inst, TLI)) { - ++NumDeadInst; - DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); - Inst->eraseFromParent(); - continue; - } - - // ConstantProp instruction if trivially constant. - if (!Inst->use_empty() && isa(Inst->getOperand(0))) - if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) { - DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " - << *Inst << '\n'); - Inst->replaceAllUsesWith(C); - ++NumConstProp; - Inst->eraseFromParent(); - continue; - } - - if (DL) { - // See if we can constant fold its operands. - for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); - i != e; ++i) { - ConstantExpr *CE = dyn_cast(i); - if (CE == nullptr) continue; - - Constant*& FoldRes = FoldedConstants[CE]; - if (!FoldRes) - FoldRes = ConstantFoldConstantExpression(CE, DL, TLI); - if (!FoldRes) - FoldRes = CE; - - if (FoldRes != CE) { - *i = FoldRes; - MadeIRChange = true; - } - } - } - - InstrsForInstCombineWorklist.push_back(Inst); - } - - // Recursively visit successors. If this is a branch or switch on a - // constant, only visit the reachable successor. - TerminatorInst *TI = BB->getTerminator(); - if (BranchInst *BI = dyn_cast(TI)) { - if (BI->isConditional() && isa(BI->getCondition())) { - bool CondVal = cast(BI->getCondition())->getZExtValue(); - BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); - Worklist.push_back(ReachableBB); - continue; - } - } else if (SwitchInst *SI = dyn_cast(TI)) { - if (ConstantInt *Cond = dyn_cast(SI->getCondition())) { - // See if this is an explicit destination. - for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); - i != e; ++i) - if (i.getCaseValue() == Cond) { - BasicBlock *ReachableBB = i.getCaseSuccessor(); - Worklist.push_back(ReachableBB); - continue; - } - - // Otherwise it is the default destination. - Worklist.push_back(SI->getDefaultDest()); - continue; - } - } - - for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) - Worklist.push_back(TI->getSuccessor(i)); - } while (!Worklist.empty()); - - // Once we've found all of the instructions to add to instcombine's worklist, - // add them in reverse order. This way instcombine will visit from the top - // of the function down. This jives well with the way that it adds all uses - // of instructions to the worklist after doing a transformation, thus avoiding - // some N^2 behavior in pathological cases. - IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], - InstrsForInstCombineWorklist.size()); - - return MadeIRChange; -} - -bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { - MadeIRChange = false; - - DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " - << F.getName() << "\n"); - - { - // Do a depth-first traversal of the function, populate the worklist with - // the reachable instructions. Ignore blocks that are not reachable. Keep - // track of which blocks we visit. - SmallPtrSet Visited; - MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL, - TLI); - - // Do a quick scan over the function. If we find any blocks that are - // unreachable, remove any instructions inside of them. This prevents - // the instcombine code from having to deal with some bad special cases. - for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { - if (Visited.count(BB)) continue; - - // Delete the instructions backwards, as it has a reduced likelihood of - // having to update as many def-use and use-def chains. - Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. - while (EndInst != BB->begin()) { - // Delete the next to last instruction. - BasicBlock::iterator I = EndInst; - Instruction *Inst = --I; - if (!Inst->use_empty()) - Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); - if (isa(Inst)) { - EndInst = Inst; - continue; - } - if (!isa(Inst)) { - ++NumDeadInst; - MadeIRChange = true; - } - Inst->eraseFromParent(); - } - } - } - +bool InstCombiner::run() { while (!Worklist.isEmpty()) { Instruction *I = Worklist.RemoveOne(); if (I == nullptr) continue; // skip null values. @@ -2795,7 +2736,8 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { } // Instruction isn't dead, see if we can constant propagate it. - if (!I->use_empty() && isa(I->getOperand(0))) + if (!I->use_empty() && + (I->getNumOperands() == 0 || isa(I->getOperand(0)))) { if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) { DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); @@ -2806,6 +2748,28 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { MadeIRChange = true; continue; } + } + + // In general, it is possible for computeKnownBits to determine all bits in a + // value even when the operands are not all constants. + if (!I->use_empty() && I->getType()->isIntegerTy()) { + unsigned BitWidth = I->getType()->getScalarSizeInBits(); + APInt KnownZero(BitWidth, 0); + APInt KnownOne(BitWidth, 0); + computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I); + if ((KnownZero | KnownOne).isAllOnesValue()) { + Constant *C = ConstantInt::get(I->getContext(), KnownOne); + DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C << + " from: " << *I << '\n'); + + // Add operands to the worklist. + ReplaceInstUsesWith(*I, C); + ++NumConstProp; + EraseInstFromFunction(*I); + MadeIRChange = true; + continue; + } + } // See if we can trivially sink this instruction to a successor basic block. if (I->hasOneUse()) { @@ -2847,7 +2811,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { } // Now that we have an instruction, try combining it to simplify it. - Builder->SetInsertPoint(I->getParent(), I); + Builder->SetInsertPoint(I); Builder->SetCurrentDebugLocation(I->getDebugLoc()); #ifndef NDEBUG @@ -2863,7 +2827,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { DEBUG(dbgs() << "IC: Old = " << *I << '\n' << " New = " << *Result << '\n'); - if (!I->getDebugLoc().isUnknown()) + if (I->getDebugLoc()) Result->setDebugLoc(I->getDebugLoc()); // Everything uses the new instruction now. I->replaceAllUsesWith(Result); @@ -2877,7 +2841,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { // Insert the new instruction into the basic block... BasicBlock *InstParent = I->getParent(); - BasicBlock::iterator InsertPos = I; + BasicBlock::iterator InsertPos = I->getIterator(); // If we replace a PHI with something that isn't a PHI, fix up the // insertion point. @@ -2910,64 +2874,293 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { return MadeIRChange; } +/// Walk the function in depth-first order, adding all reachable code to the +/// worklist. +/// +/// This has a couple of tricks to make the code faster and more powerful. In +/// particular, we constant fold and DCE instructions as we go, to avoid adding +/// them to the worklist (this significantly speeds up instcombine on code where +/// many instructions are dead or constant). Additionally, if we find a branch +/// whose condition is a known constant, we only visit the reachable successors. +/// +static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, + SmallPtrSetImpl &Visited, + InstCombineWorklist &ICWorklist, + const TargetLibraryInfo *TLI) { + bool MadeIRChange = false; + SmallVector Worklist; + Worklist.push_back(BB); + + SmallVector InstrsForInstCombineWorklist; + DenseMap FoldedConstants; + + do { + BB = Worklist.pop_back_val(); + + // We have now visited this block! If we've already been here, ignore it. + if (!Visited.insert(BB).second) + continue; + + for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { + Instruction *Inst = &*BBI++; + + // DCE instruction if trivially dead. + if (isInstructionTriviallyDead(Inst, TLI)) { + ++NumDeadInst; + DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); + Inst->eraseFromParent(); + continue; + } + + // ConstantProp instruction if trivially constant. + if (!Inst->use_empty() && + (Inst->getNumOperands() == 0 || isa(Inst->getOperand(0)))) + if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) { + DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " + << *Inst << '\n'); + Inst->replaceAllUsesWith(C); + ++NumConstProp; + Inst->eraseFromParent(); + continue; + } + + // See if we can constant fold its operands. + for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); i != e; + ++i) { + ConstantExpr *CE = dyn_cast(i); + if (CE == nullptr) + continue; + + Constant *&FoldRes = FoldedConstants[CE]; + if (!FoldRes) + FoldRes = ConstantFoldConstantExpression(CE, DL, TLI); + if (!FoldRes) + FoldRes = CE; + + if (FoldRes != CE) { + *i = FoldRes; + MadeIRChange = true; + } + } + + InstrsForInstCombineWorklist.push_back(Inst); + } + + // Recursively visit successors. If this is a branch or switch on a + // constant, only visit the reachable successor. + TerminatorInst *TI = BB->getTerminator(); + if (BranchInst *BI = dyn_cast(TI)) { + if (BI->isConditional() && isa(BI->getCondition())) { + bool CondVal = cast(BI->getCondition())->getZExtValue(); + BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); + Worklist.push_back(ReachableBB); + continue; + } + } else if (SwitchInst *SI = dyn_cast(TI)) { + if (ConstantInt *Cond = dyn_cast(SI->getCondition())) { + // See if this is an explicit destination. + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); + i != e; ++i) + if (i.getCaseValue() == Cond) { + BasicBlock *ReachableBB = i.getCaseSuccessor(); + Worklist.push_back(ReachableBB); + continue; + } + + // Otherwise it is the default destination. + Worklist.push_back(SI->getDefaultDest()); + continue; + } + } + + for (BasicBlock *SuccBB : TI->successors()) + Worklist.push_back(SuccBB); + } while (!Worklist.empty()); + + // Once we've found all of the instructions to add to instcombine's worklist, + // add them in reverse order. This way instcombine will visit from the top + // of the function down. This jives well with the way that it adds all uses + // of instructions to the worklist after doing a transformation, thus avoiding + // some N^2 behavior in pathological cases. + ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist); + + return MadeIRChange; +} + +/// \brief Populate the IC worklist from a function, and prune any dead basic +/// blocks discovered in the process. +/// +/// This also does basic constant propagation and other forward fixing to make +/// the combiner itself run much faster. +static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL, + TargetLibraryInfo *TLI, + InstCombineWorklist &ICWorklist) { + bool MadeIRChange = false; + + // Do a depth-first traversal of the function, populate the worklist with + // the reachable instructions. Ignore blocks that are not reachable. Keep + // track of which blocks we visit. + SmallPtrSet Visited; + MadeIRChange |= + AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI); + + // Do a quick scan over the function. If we find any blocks that are + // unreachable, remove any instructions inside of them. This prevents + // the instcombine code from having to deal with some bad special cases. + for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { + if (Visited.count(&*BB)) + continue; + + // Delete the instructions backwards, as it has a reduced likelihood of + // having to update as many def-use and use-def chains. + Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. + while (EndInst != BB->begin()) { + // Delete the next to last instruction. + Instruction *Inst = &*--EndInst->getIterator(); + if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) + Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); + if (Inst->isEHPad()) { + EndInst = Inst; + continue; + } + if (!isa(Inst)) { + ++NumDeadInst; + MadeIRChange = true; + } + if (!Inst->getType()->isTokenTy()) + Inst->eraseFromParent(); + } + } + + return MadeIRChange; +} + +static bool +combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist, + AliasAnalysis *AA, AssumptionCache &AC, + TargetLibraryInfo &TLI, DominatorTree &DT, + LoopInfo *LI = nullptr) { + auto &DL = F.getParent()->getDataLayout(); + + /// Builder - This is an IRBuilder that automatically inserts new + /// instructions into the worklist when they are created. + IRBuilder Builder( + F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC)); + + // Lower dbg.declare intrinsics otherwise their value may be clobbered + // by instcombiner. + bool DbgDeclaresChanged = LowerDbgDeclare(F); + + // Iterate while there is work to do. + int Iteration = 0; + for (;;) { + ++Iteration; + DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " + << F.getName() << "\n"); + + bool Changed = false; + if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist)) + Changed = true; + + InstCombiner IC(Worklist, &Builder, F.optForMinSize(), + AA, &AC, &TLI, &DT, DL, LI); + if (IC.run()) + Changed = true; + + if (!Changed) + break; + } + + return DbgDeclaresChanged || Iteration > 1; +} + +PreservedAnalyses InstCombinePass::run(Function &F, + AnalysisManager *AM) { + auto &AC = AM->getResult(F); + auto &DT = AM->getResult(F); + auto &TLI = AM->getResult(F); + + auto *LI = AM->getCachedResult(F); + + // FIXME: The AliasAnalysis is not yet supported in the new pass manager + if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT, LI)) + // No changes, all analyses are preserved. + return PreservedAnalyses::all(); + + // Mark all the analyses that instcombine updates as preserved. + // FIXME: Need a way to preserve CFG analyses here! + PreservedAnalyses PA; + PA.preserve(); + return PA; +} + namespace { -class InstCombinerLibCallSimplifier final : public LibCallSimplifier { - InstCombiner *IC; +/// \brief The legacy pass manager's instcombine pass. +/// +/// This is a basic whole-function wrapper around the instcombine utility. It +/// will try to combine all instructions in the function. +class InstructionCombiningPass : public FunctionPass { + InstCombineWorklist Worklist; + public: - InstCombinerLibCallSimplifier(const DataLayout *DL, - const TargetLibraryInfo *TLI, - InstCombiner *IC) - : LibCallSimplifier(DL, TLI, EnableUnsafeFPShrink) { - this->IC = IC; - } + static char ID; // Pass identification, replacement for typeid - /// replaceAllUsesWith - override so that instruction replacement - /// can be defined in terms of the instruction combiner framework. - void replaceAllUsesWith(Instruction *I, Value *With) const override { - IC->ReplaceInstUsesWith(*I, With); + InstructionCombiningPass() : FunctionPass(ID) { + initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); } + + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnFunction(Function &F) override; }; } -bool InstCombiner::runOnFunction(Function &F) { +void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addPreserved(); + AU.addPreserved(); +} + +bool InstructionCombiningPass::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; - AT = &getAnalysis(); - DataLayoutPass *DLP = getAnalysisIfAvailable(); - DL = DLP ? &DLP->getDataLayout() : nullptr; - DT = &getAnalysis().getDomTree(); - TLI = &getAnalysis(); - - // Minimizing size? - MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::MinSize); - - /// Builder - This is an IRBuilder that automatically inserts new - /// instructions into the worklist when they are created. - IRBuilder - TheBuilder(F.getContext(), TargetFolder(DL), - InstCombineIRInserter(Worklist, AT)); - Builder = &TheBuilder; + // Required analyses. + auto AA = &getAnalysis().getAAResults(); + auto &AC = getAnalysis().getAssumptionCache(F); + auto &TLI = getAnalysis().getTLI(); + auto &DT = getAnalysis().getDomTree(); - InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this); - Simplifier = &TheSimplifier; + // Optional analyses. + auto *LIWP = getAnalysisIfAvailable(); + auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; - bool EverMadeChange = false; + return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT, LI); +} - // Lower dbg.declare intrinsics otherwise their value may be clobbered - // by instcombiner. - EverMadeChange = LowerDbgDeclare(F); +char InstructionCombiningPass::ID = 0; +INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine", + "Combine redundant instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) +INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) +INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine", + "Combine redundant instructions", false, false) - // Iterate while there is work to do. - unsigned Iteration = 0; - while (DoOneIteration(F, Iteration++)) - EverMadeChange = true; +// Initialization Routines +void llvm::initializeInstCombine(PassRegistry &Registry) { + initializeInstructionCombiningPassPass(Registry); +} - Builder = nullptr; - return EverMadeChange; +void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { + initializeInstructionCombiningPassPass(*unwrap(R)); } FunctionPass *llvm::createInstructionCombiningPass() { - return new InstCombiner(); + return new InstructionCombiningPass(); }