X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FUtils%2FSimplifyCFG.cpp;h=14a76d7802eb6cea650fb3ffe34e81d0c20dd4f5;hp=a01f9b0ae6bcb62008b5959b9c886e58772e1a4f;hb=83fa9b85da9b94a331bc2abd545b410e516ac6e5;hpb=5e49dcf79a0f85f698c4309a185df79e170b05e2 diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index a01f9b0ae6b..14a76d7802e 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -14,11 +14,13 @@ #include "llvm/Transforms/Utils/Local.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" @@ -43,7 +45,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/ValueMapper.h" #include #include #include @@ -52,9 +54,13 @@ using namespace PatternMatch; #define DEBUG_TYPE "simplifycfg" +// Chosen as 2 so as to be cheap, but still to have enough power to fold +// a select, so the "clamp" idiom (of a min followed by a max) will be caught. +// To catch this, we need to fold a compare and a select, hence '2' being the +// minimum reasonable default. static cl::opt -PHINodeFoldingThreshold("phi-node-folding-threshold", cl::Hidden, cl::init(1), - cl::desc("Control the amount of phi node folding to perform (default = 1)")); +PHINodeFoldingThreshold("phi-node-folding-threshold", cl::Hidden, cl::init(2), + cl::desc("Control the amount of phi node folding to perform (default = 2)")); static cl::opt DupRet("simplifycfg-dup-ret", cl::Hidden, cl::init(false), @@ -68,13 +74,41 @@ static cl::opt HoistCondStores( "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), cl::desc("Hoist conditional stores if an unconditional store precedes")); +static cl::opt MergeCondStores( + "simplifycfg-merge-cond-stores", cl::Hidden, cl::init(true), + cl::desc("Hoist conditional stores even if an unconditional store does not " + "precede - hoist multiple conditional stores into a single " + "predicated store")); + +static cl::opt MergeCondStoresAggressively( + "simplifycfg-merge-cond-stores-aggressively", cl::Hidden, cl::init(false), + cl::desc("When merging conditional stores, do so even if the resultant " + "basic blocks are unlikely to be if-converted as a result")); + +static cl::opt SpeculateOneExpensiveInst( + "speculate-one-expensive-inst", cl::Hidden, cl::init(true), + cl::desc("Allow exactly one expensive instruction to be speculatively " + "executed")); + STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); +STATISTIC(NumLinearMaps, "Number of switch instructions turned into linear mapping"); STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); STATISTIC(NumLookupTablesHoles, "Number of switch instructions turned into lookup tables (holes checked)"); +STATISTIC(NumTableCmpReuses, "Number of reused switch table lookup compares"); STATISTIC(NumSinkCommons, "Number of common instructions sunk down to the end block"); STATISTIC(NumSpeculations, "Number of speculative executed instructions"); namespace { + // The first field contains the value that the switch produces when a certain + // case group is selected, and the second field is a vector containing the + // cases composing the case group. + typedef SmallVector>, 2> + SwitchCaseResultVectorTy; + // The first field contains the phi node that generates a result of the switch + // and the second field contains the value generated for a certain case in the + // switch for that PHI. + typedef SmallVector, 4> SwitchCaseResultsTy; + /// ValueEqualityComparisonCase - Represents a case of a switch. struct ValueEqualityComparisonCase { ConstantInt *Value; @@ -93,8 +127,9 @@ namespace { class SimplifyCFGOpt { const TargetTransformInfo &TTI; - const DataLayout *const DL; - AssumptionTracker *AT; + const DataLayout &DL; + unsigned BonusInstThreshold; + AssumptionCache *AC; Value *isValueEqualityComparison(TerminatorInst *TI); BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI, std::vector &Cases); @@ -106,6 +141,9 @@ class SimplifyCFGOpt { bool SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder); bool SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder); + bool SimplifySingleResume(ResumeInst *RI); + bool SimplifyCommonResume(ResumeInst *RI); + bool SimplifyCleanupReturn(CleanupReturnInst *RI); bool SimplifyUnreachable(UnreachableInst *UI); bool SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder); bool SimplifyIndirectBr(IndirectBrInst *IBI); @@ -113,16 +151,15 @@ class SimplifyCFGOpt { bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder); public: - SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL, - AssumptionTracker *AT) - : TTI(TTI), DL(DL), AT(AT) {} + SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout &DL, + unsigned BonusInstThreshold, AssumptionCache *AC) + : TTI(TTI), DL(DL), BonusInstThreshold(BonusInstThreshold), AC(AC) {} bool run(BasicBlock *BB); }; } -/// SafeToMergeTerminators - Return true if it is safe to merge these two +/// Return true if it is safe to merge these two /// terminator instructions together. -/// static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) { if (SI1 == SI2) return false; // Can't merge with self! @@ -146,11 +183,9 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) { return true; } -/// isProfitableToFoldUnconditional - Return true if it is safe and profitable -/// to merge these two terminator instructions together, where SI1 is an -/// unconditional branch. PhiNodes will store all PHI nodes in common -/// successors. -/// +/// Return true if it is safe and profitable to merge these two terminator +/// instructions together, where SI1 is an unconditional branch. PhiNodes will +/// store all PHI nodes in common successors. static bool isProfitableToFoldUnconditional(BranchInst *SI1, BranchInst *SI2, Instruction *Cond, @@ -187,10 +222,10 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1, return true; } -/// AddPredecessorToBlock - Update PHI nodes in Succ to indicate that there will -/// now be entries in it from the 'NewPred' block. The values that will be -/// flowing into the PHI nodes will be the same as those coming in from -/// ExistPred, an existing predecessor of Succ. +/// Update PHI nodes in Succ to indicate that there will now be entries in it +/// from the 'NewPred' block. The values that will be flowing into the PHI nodes +/// will be the same as those coming in from ExistPred, an existing predecessor +/// of Succ. static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred, BasicBlock *ExistPred) { if (!isa(Succ->begin())) return; // Quick exit if nothing to do @@ -201,48 +236,19 @@ static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred, PN->addIncoming(PN->getIncomingValueForBlock(ExistPred), NewPred); } -/// ComputeSpeculationCost - Compute an abstract "cost" of speculating the -/// given instruction, which is assumed to be safe to speculate. 1 means -/// cheap, 2 means less cheap, and UINT_MAX means prohibitively expensive. -static unsigned ComputeSpeculationCost(const User *I, const DataLayout *DL) { - assert(isSafeToSpeculativelyExecute(I, DL) && +/// Compute an abstract "cost" of speculating the given instruction, +/// which is assumed to be safe to speculate. TCC_Free means cheap, +/// TCC_Basic means less cheap, and TCC_Expensive means prohibitively +/// expensive. +static unsigned ComputeSpeculationCost(const User *I, + const TargetTransformInfo &TTI) { + assert(isSafeToSpeculativelyExecute(I) && "Instruction is not safe to speculatively execute!"); - switch (Operator::getOpcode(I)) { - default: - // In doubt, be conservative. - return UINT_MAX; - case Instruction::GetElementPtr: - // GEPs are cheap if all indices are constant. - if (!cast(I)->hasAllConstantIndices()) - return UINT_MAX; - return 1; - case Instruction::ExtractValue: - case Instruction::Load: - case Instruction::Add: - case Instruction::Sub: - case Instruction::And: - case Instruction::Or: - case Instruction::Xor: - case Instruction::Shl: - case Instruction::LShr: - case Instruction::AShr: - case Instruction::ICmp: - case Instruction::Trunc: - case Instruction::ZExt: - case Instruction::SExt: - case Instruction::BitCast: - case Instruction::ExtractElement: - case Instruction::InsertElement: - return 1; // These are all cheap. - - case Instruction::Call: - case Instruction::Select: - return 2; - } + return TTI.getUserCost(I); } -/// DominatesMergePoint - If we have a merge point of an "if condition" as -/// accepted above, return true if the specified value dominates the block. We +/// If we have a merge point of an "if condition" as accepted above, +/// return true if the specified value dominates the block. We /// don't handle the true generality of domination here, just a special case /// which works well enough for us. /// @@ -261,7 +267,8 @@ static unsigned ComputeSpeculationCost(const User *I, const DataLayout *DL) { static bool DominatesMergePoint(Value *V, BasicBlock *BB, SmallPtrSetImpl *AggressiveInsts, unsigned &CostRemaining, - const DataLayout *DL) { + const TargetTransformInfo &TTI, + unsigned Depth = 0) { Instruction *I = dyn_cast(V); if (!I) { // Non-instructions all dominate instructions, but not all constantexprs @@ -294,37 +301,46 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB, // Okay, it looks like the instruction IS in the "condition". Check to // see if it's a cheap instruction to unconditionally compute, and if it // only uses stuff defined outside of the condition. If so, hoist it out. - if (!isSafeToSpeculativelyExecute(I, DL)) + if (!isSafeToSpeculativelyExecute(I)) return false; - unsigned Cost = ComputeSpeculationCost(I, DL); + unsigned Cost = ComputeSpeculationCost(I, TTI); - if (Cost > CostRemaining) + // Allow exactly one instruction to be speculated regardless of its cost + // (as long as it is safe to do so). + // This is intended to flatten the CFG even if the instruction is a division + // or other expensive operation. The speculation of an expensive instruction + // is expected to be undone in CodeGenPrepare if the speculation has not + // enabled further IR optimizations. + if (Cost > CostRemaining && + (!SpeculateOneExpensiveInst || !AggressiveInsts->empty() || Depth > 0)) return false; - CostRemaining -= Cost; + // Avoid unsigned wrap. + CostRemaining = (Cost > CostRemaining) ? 0 : CostRemaining - Cost; // Okay, we can only really hoist these out if their operands do // not take us over the cost threshold. for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) - if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining, DL)) + if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining, TTI, + Depth + 1)) return false; // Okay, it's safe to do this! Remember this instruction. AggressiveInsts->insert(I); return true; } -/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr +/// Extract ConstantInt from value, looking through IntToPtr /// and PointerNullValue. Return NULL if value is not a constant int. -static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) { +static ConstantInt *GetConstantInt(Value *V, const DataLayout &DL) { // Normal constant int. ConstantInt *CI = dyn_cast(V); - if (CI || !DL || !isa(V) || !V->getType()->isPointerTy()) + if (CI || !isa(V) || !V->getType()->isPointerTy()) return CI; // This is some kind of pointer constant. Turn it into a pointer-sized // ConstantInt if possible. - IntegerType *PtrTy = cast(DL->getIntPtrType(V->getType())); + IntegerType *PtrTy = cast(DL.getIntPtrType(V->getType())); // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*). if (isa(V)) @@ -344,114 +360,176 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) { return nullptr; } -/// GatherConstantCompares - Given a potentially 'or'd or 'and'd together -/// collection of icmp eq/ne instructions that compare a value against a -/// constant, return the value being compared, and stick the constant into the -/// Values vector. -static Value * -GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, - const DataLayout *DL, bool isEQ, unsigned &UsedICmps) { - Instruction *I = dyn_cast(V); - if (!I) return nullptr; - - // If this is an icmp against a constant, handle this as one of the cases. - if (ICmpInst *ICI = dyn_cast(I)) { - if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) { - Value *RHSVal; - ConstantInt *RHSC; - - if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ:ICmpInst::ICMP_NE)) { - // (x & ~2^x) == y --> x == y || x == y|2^x - // This undoes a transformation done by instcombine to fuse 2 compares. - if (match(ICI->getOperand(0), - m_And(m_Value(RHSVal), m_ConstantInt(RHSC)))) { - APInt Not = ~RHSC->getValue(); - if (Not.isPowerOf2()) { - Vals.push_back(C); - Vals.push_back( - ConstantInt::get(C->getContext(), C->getValue() | Not)); - UsedICmps++; - return RHSVal; - } - } +namespace { + +/// Given a chain of or (||) or and (&&) comparison of a value against a +/// constant, this will try to recover the information required for a switch +/// structure. +/// It will depth-first traverse the chain of comparison, seeking for patterns +/// like %a == 12 or %a < 4 and combine them to produce a set of integer +/// representing the different cases for the switch. +/// Note that if the chain is composed of '||' it will build the set of elements +/// that matches the comparisons (i.e. any of this value validate the chain) +/// while for a chain of '&&' it will build the set elements that make the test +/// fail. +struct ConstantComparesGatherer { + const DataLayout &DL; + Value *CompValue; /// Value found for the switch comparison + Value *Extra; /// Extra clause to be checked before the switch + SmallVector Vals; /// Set of integers to match in switch + unsigned UsedICmps; /// Number of comparisons matched in the and/or chain + + /// Construct and compute the result for the comparison instruction Cond + ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL) + : DL(DL), CompValue(nullptr), Extra(nullptr), UsedICmps(0) { + gather(Cond); + } + + /// Prevent copy + ConstantComparesGatherer(const ConstantComparesGatherer &) = delete; + ConstantComparesGatherer & + operator=(const ConstantComparesGatherer &) = delete; + +private: - UsedICmps++; - Vals.push_back(C); - return I->getOperand(0); + /// Try to set the current value used for the comparison, it succeeds only if + /// it wasn't set before or if the new value is the same as the old one + bool setValueOnce(Value *NewVal) { + if(CompValue && CompValue != NewVal) return false; + CompValue = NewVal; + return (CompValue != nullptr); + } + + /// Try to match Instruction "I" as a comparison against a constant and + /// populates the array Vals with the set of values that match (or do not + /// match depending on isEQ). + /// Return false on failure. On success, the Value the comparison matched + /// against is placed in CompValue. + /// If CompValue is already set, the function is expected to fail if a match + /// is found but the value compared to is different. + bool matchInstruction(Instruction *I, bool isEQ) { + // If this is an icmp against a constant, handle this as one of the cases. + ICmpInst *ICI; + ConstantInt *C; + if (!((ICI = dyn_cast(I)) && + (C = GetConstantInt(I->getOperand(1), DL)))) { + return false; + } + + Value *RHSVal; + ConstantInt *RHSC; + + // Pattern match a special case + // (x & ~2^x) == y --> x == y || x == y|2^x + // This undoes a transformation done by instcombine to fuse 2 compares. + if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ:ICmpInst::ICMP_NE)) { + if (match(ICI->getOperand(0), + m_And(m_Value(RHSVal), m_ConstantInt(RHSC)))) { + APInt Not = ~RHSC->getValue(); + if (Not.isPowerOf2()) { + // If we already have a value for the switch, it has to match! + if(!setValueOnce(RHSVal)) + return false; + + Vals.push_back(C); + Vals.push_back(ConstantInt::get(C->getContext(), + C->getValue() | Not)); + UsedICmps++; + return true; + } } - // If we have "x ult 3" comparison, for example, then we can add 0,1,2 to - // the set. - ConstantRange Span = - ConstantRange::makeICmpRegion(ICI->getPredicate(), C->getValue()); - - // Shift the range if the compare is fed by an add. This is the range - // compare idiom as emitted by instcombine. - bool hasAdd = - match(I->getOperand(0), m_Add(m_Value(RHSVal), m_ConstantInt(RHSC))); - if (hasAdd) - Span = Span.subtract(RHSC->getValue()); - - // If this is an and/!= check then we want to optimize "x ugt 2" into - // x != 0 && x != 1. - if (!isEQ) - Span = Span.inverse(); - - // If there are a ton of values, we don't want to make a ginormous switch. - if (Span.getSetSize().ugt(8) || Span.isEmptySet()) - return nullptr; - - for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp) - Vals.push_back(ConstantInt::get(V->getContext(), Tmp)); + // If we already have a value for the switch, it has to match! + if(!setValueOnce(ICI->getOperand(0))) + return false; + UsedICmps++; - return hasAdd ? RHSVal : I->getOperand(0); + Vals.push_back(C); + return ICI->getOperand(0); } - return nullptr; - } - // Otherwise, we can only handle an | or &, depending on isEQ. - if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And)) - return nullptr; + // If we have "x ult 3", for example, then we can add 0,1,2 to the set. + ConstantRange Span = ConstantRange::makeAllowedICmpRegion( + ICI->getPredicate(), C->getValue()); - unsigned NumValsBeforeLHS = Vals.size(); - unsigned UsedICmpsBeforeLHS = UsedICmps; - if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, DL, - isEQ, UsedICmps)) { - unsigned NumVals = Vals.size(); - unsigned UsedICmpsBeforeRHS = UsedICmps; - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL, - isEQ, UsedICmps)) { - if (LHS == RHS) - return LHS; - Vals.resize(NumVals); - UsedICmps = UsedICmpsBeforeRHS; + // Shift the range if the compare is fed by an add. This is the range + // compare idiom as emitted by instcombine. + Value *CandidateVal = I->getOperand(0); + if(match(I->getOperand(0), m_Add(m_Value(RHSVal), m_ConstantInt(RHSC)))) { + Span = Span.subtract(RHSC->getValue()); + CandidateVal = RHSVal; } - // The RHS of the or/and can't be folded in and we haven't used "Extra" yet, - // set it and return success. - if (Extra == nullptr || Extra == I->getOperand(1)) { - Extra = I->getOperand(1); - return LHS; + // If this is an and/!= check, then we are looking to build the set of + // value that *don't* pass the and chain. I.e. to turn "x ugt 2" into + // x != 0 && x != 1. + if (!isEQ) + Span = Span.inverse(); + + // If there are a ton of values, we don't want to make a ginormous switch. + if (Span.getSetSize().ugt(8) || Span.isEmptySet()) { + return false; } - Vals.resize(NumValsBeforeLHS); - UsedICmps = UsedICmpsBeforeLHS; - return nullptr; + // If we already have a value for the switch, it has to match! + if(!setValueOnce(CandidateVal)) + return false; + + // Add all values from the range to the set + for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp) + Vals.push_back(ConstantInt::get(I->getContext(), Tmp)); + + UsedICmps++; + return true; + } - // If the LHS can't be folded in, but Extra is available and RHS can, try to - // use LHS as Extra. - if (Extra == nullptr || Extra == I->getOperand(0)) { - Value *OldExtra = Extra; - Extra = I->getOperand(0); - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL, - isEQ, UsedICmps)) - return RHS; - assert(Vals.size() == NumValsBeforeLHS); - Extra = OldExtra; + /// Given a potentially 'or'd or 'and'd together collection of icmp + /// eq/ne/lt/gt instructions that compare a value against a constant, extract + /// the value being compared, and stick the list constants into the Vals + /// vector. + /// One "Extra" case is allowed to differ from the other. + void gather(Value *V) { + Instruction *I = dyn_cast(V); + bool isEQ = (I->getOpcode() == Instruction::Or); + + // Keep a stack (SmallVector for efficiency) for depth-first traversal + SmallVector DFT; + + // Initialize + DFT.push_back(V); + + while(!DFT.empty()) { + V = DFT.pop_back_val(); + + if (Instruction *I = dyn_cast(V)) { + // If it is a || (or && depending on isEQ), process the operands. + if (I->getOpcode() == (isEQ ? Instruction::Or : Instruction::And)) { + DFT.push_back(I->getOperand(1)); + DFT.push_back(I->getOperand(0)); + continue; + } + + // Try to match the current instruction + if (matchInstruction(I, isEQ)) + // Match succeed, continue the loop + continue; + } + + // One element of the sequence of || (or &&) could not be match as a + // comparison against the same value as the others. + // We allow only one "Extra" case to be checked before the switch + if (!Extra) { + Extra = V; + continue; + } + // Failed to parse a proper sequence, abort now + CompValue = nullptr; + break; + } } +}; - return nullptr; } static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) { @@ -469,7 +547,7 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) { if (Cond) RecursivelyDeleteTriviallyDeadInstructions(Cond); } -/// isValueEqualityComparison - Return true if the specified terminator checks +/// Return true if the specified terminator checks /// to see if a value is equal to constant integer value. Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) { Value *CV = nullptr; @@ -481,22 +559,23 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) { CV = SI->getCondition(); } else if (BranchInst *BI = dyn_cast(TI)) if (BI->isConditional() && BI->getCondition()->hasOneUse()) - if (ICmpInst *ICI = dyn_cast(BI->getCondition())) + if (ICmpInst *ICI = dyn_cast(BI->getCondition())) { if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL)) CV = ICI->getOperand(0); + } // Unwrap any lossless ptrtoint cast. - if (DL && CV) { + if (CV) { if (PtrToIntInst *PTII = dyn_cast(CV)) { Value *Ptr = PTII->getPointerOperand(); - if (PTII->getType() == DL->getIntPtrType(Ptr->getType())) + if (PTII->getType() == DL.getIntPtrType(Ptr->getType())) CV = Ptr; } } return CV; } -/// GetValueEqualityComparisonCases - Given a value comparison instruction, +/// Given a value comparison instruction, /// decode all of the 'cases' that it represents and return the 'default' block. BasicBlock *SimplifyCFGOpt:: GetValueEqualityComparisonCases(TerminatorInst *TI, @@ -520,15 +599,14 @@ GetValueEqualityComparisonCases(TerminatorInst *TI, } -/// EliminateBlockCases - Given a vector of bb/value pairs, remove any entries +/// Given a vector of bb/value pairs, remove any entries /// in the list that match the specified block. static void EliminateBlockCases(BasicBlock *BB, std::vector &Cases) { Cases.erase(std::remove(Cases.begin(), Cases.end(), BB), Cases.end()); } -/// ValuesOverlap - Return true if there are any keys in C1 that exist in C2 as -/// well. +/// Return true if there are any keys in C1 that exist in C2 as well. static bool ValuesOverlap(std::vector &C1, std::vector &C2) { @@ -562,12 +640,11 @@ ValuesOverlap(std::vector &C1, return false; } -/// SimplifyEqualityComparisonWithOnlyPredecessor - If TI is known to be a -/// terminator instruction and its block is known to only have a single -/// predecessor block, check to see if that predecessor is also a value -/// comparison with the same value, and if that comparison determines the -/// outcome of this comparison. If so, simplify TI. This does a very limited -/// form of jump threading. +/// If TI is known to be a terminator instruction and its block is known to +/// only have a single predecessor block, check to see if that predecessor is +/// also a value comparison with the same value, and if that comparison +/// determines the outcome of this comparison. If so, simplify TI. This does a +/// very limited form of jump threading. bool SimplifyCFGOpt:: SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, BasicBlock *Pred, @@ -631,13 +708,12 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, // Collect branch weights into a vector. SmallVector Weights; - MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); + MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); bool HasWeight = MD && (MD->getNumOperands() == 2 + SI->getNumCases()); if (HasWeight) for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; ++MD_i) { - ConstantInt* CI = dyn_cast(MD->getOperand(MD_i)); - assert(CI); + ConstantInt *CI = mdconst::extract(MD->getOperand(MD_i)); Weights.push_back(CI->getValue().getZExtValue()); } for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) { @@ -704,7 +780,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, } namespace { - /// ConstantIntOrdering - This class implements a stable ordering of constant + /// This class implements a stable ordering of constant /// integers that does not depend on their address. This is important for /// applications that sort ConstantInt's to ensure uniqueness. struct ConstantIntOrdering { @@ -726,7 +802,7 @@ static int ConstantIntSortPredicate(ConstantInt *const *P1, } static inline bool HasBranchWeights(const Instruction* I) { - MDNode* ProfMD = I->getMetadata(LLVMContext::MD_prof); + MDNode *ProfMD = I->getMetadata(LLVMContext::MD_prof); if (ProfMD && ProfMD->getOperand(0)) if (MDString* MDS = dyn_cast(ProfMD->getOperand(0))) return MDS->getString().equals("branch_weights"); @@ -739,10 +815,10 @@ static inline bool HasBranchWeights(const Instruction* I) { /// metadata. static void GetBranchWeights(TerminatorInst *TI, SmallVectorImpl &Weights) { - MDNode* MD = TI->getMetadata(LLVMContext::MD_prof); + MDNode *MD = TI->getMetadata(LLVMContext::MD_prof); assert(MD); for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) { - ConstantInt *CI = cast(MD->getOperand(i)); + ConstantInt *CI = mdconst::extract(MD->getOperand(i)); Weights.push_back(CI->getValue().getZExtValue()); } @@ -767,8 +843,8 @@ static void FitWeights(MutableArrayRef Weights) { } } -/// FoldValueComparisonIntoPredecessors - The specified terminator is a value -/// equality comparison instruction (either a switch or a branch on "X == c"). +/// The specified terminator is a value equality comparison instruction +/// (either a switch or a branch on "X == c"). /// See if any of the predecessors of the terminator block are value comparisons /// on the same value. If so, and if safe to do so, fold them together. bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, @@ -925,14 +1001,13 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, // Okay, at this point, we know which new successor Pred will get. Make // sure we update the number of entries in the PHI nodes for these // successors. - for (unsigned i = 0, e = NewSuccessors.size(); i != e; ++i) - AddPredecessorToBlock(NewSuccessors[i], Pred, BB); + for (BasicBlock *NewSuccessor : NewSuccessors) + AddPredecessorToBlock(NewSuccessor, Pred, BB); Builder.SetInsertPoint(PTI); // Convert pointer to int before we switch. if (CV->getType()->isPointerTy()) { - assert(DL && "Cannot switch on pointer without DataLayout"); - CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()), + CV = Builder.CreatePtrToInt(CV, DL.getIntPtrType(CV->getType()), "magicptr"); } @@ -940,8 +1015,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, SwitchInst *NewSI = Builder.CreateSwitch(CV, PredDefault, PredCases.size()); NewSI->setDebugLoc(PTI->getDebugLoc()); - for (unsigned i = 0, e = PredCases.size(); i != e; ++i) - NewSI->addCase(PredCases[i].Value, PredCases[i].Dest); + for (ValueEqualityComparisonCase &V : PredCases) + NewSI->addCase(V.Value, V.Dest); if (PredHasWeights || SuccHasWeights) { // Halve the weights if any of them cannot fit in an uint32_t @@ -978,10 +1053,9 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, return Changed; } -// isSafeToHoistInvoke - If we would need to insert a select that uses the -// value of this invoke (comments in HoistThenElseCodeToIf explain why we -// would need to do this), we can't hoist the invoke, as there is nowhere -// to put the select in this case. +// If we would need to insert a select that uses the value of this invoke +// (comments in HoistThenElseCodeToIf explain why we would need to do this), we +// can't hoist the invoke, as there is nowhere to put the select in this case. static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2, Instruction *I1, Instruction *I2) { for (succ_iterator SI = succ_begin(BB1), E = succ_end(BB1); SI != E; ++SI) { @@ -998,10 +1072,13 @@ static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2, return true; } -/// HoistThenElseCodeToIf - Given a conditional branch that goes to BB1 and -/// BB2, hoist any common code in the two blocks up into the branch block. The -/// caller of this function guarantees that BI's block dominates BB1 and BB2. -static bool HoistThenElseCodeToIf(BranchInst *BI, const DataLayout *DL) { +static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I); + +/// Given a conditional branch that goes to BB1 and BB2, hoist any common code +/// in the two blocks up into the branch block. The caller of this function +/// guarantees that BI's block dominates BB1 and BB2. +static bool HoistThenElseCodeToIf(BranchInst *BI, + const TargetTransformInfo &TTI) { // This does very trivial matching, with limited scanning, to find identical // instructions in the two blocks. In particular, we don't want to get into // O(M*N) situations here where M and N are the sizes of BB1 and BB2. As @@ -1013,15 +1090,15 @@ static bool HoistThenElseCodeToIf(BranchInst *BI, const DataLayout *DL) { BasicBlock::iterator BB1_Itr = BB1->begin(); BasicBlock::iterator BB2_Itr = BB2->begin(); - Instruction *I1 = BB1_Itr++, *I2 = BB2_Itr++; + Instruction *I1 = &*BB1_Itr++, *I2 = &*BB2_Itr++; // Skip debug info if it is not identical. DbgInfoIntrinsic *DBI1 = dyn_cast(I1); DbgInfoIntrinsic *DBI2 = dyn_cast(I2); if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { while (isa(I1)) - I1 = BB1_Itr++; + I1 = &*BB1_Itr++; while (isa(I2)) - I2 = BB2_Itr++; + I2 = &*BB2_Itr++; } if (isa(I1) || !I1->isIdenticalToWhenDefined(I2) || (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))) @@ -1036,33 +1113,36 @@ static bool HoistThenElseCodeToIf(BranchInst *BI, const DataLayout *DL) { if (isa(I1)) goto HoistTerminator; + if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2)) + return Changed; + // For a normal instruction, we just move one to right before the branch, // then replace all uses of the other with the first. Finally, we remove // the now redundant second instruction. - BIParent->getInstList().splice(BI, BB1->getInstList(), I1); + BIParent->getInstList().splice(BI->getIterator(), BB1->getInstList(), I1); if (!I2->use_empty()) I2->replaceAllUsesWith(I1); I1->intersectOptionalDataWith(I2); unsigned KnownIDs[] = { - LLVMContext::MD_tbaa, - LLVMContext::MD_range, - LLVMContext::MD_fpmath, - LLVMContext::MD_invariant_load - }; + LLVMContext::MD_tbaa, LLVMContext::MD_range, + LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, + LLVMContext::MD_nonnull, LLVMContext::MD_invariant_group, + LLVMContext::MD_align, LLVMContext::MD_dereferenceable, + LLVMContext::MD_dereferenceable_or_null}; combineMetadata(I1, I2, KnownIDs); I2->eraseFromParent(); Changed = true; - I1 = BB1_Itr++; - I2 = BB2_Itr++; + I1 = &*BB1_Itr++; + I2 = &*BB2_Itr++; // Skip debug info if it is not identical. DbgInfoIntrinsic *DBI1 = dyn_cast(I1); DbgInfoIntrinsic *DBI2 = dyn_cast(I2); if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { while (isa(I1)) - I1 = BB1_Itr++; + I1 = &*BB1_Itr++; while (isa(I2)) - I2 = BB2_Itr++; + I2 = &*BB2_Itr++; } } while (I1->isIdenticalToWhenDefined(I2)); @@ -1082,16 +1162,22 @@ HoistTerminator: if (BB1V == BB2V) continue; - if (isa(BB1V) && !isSafeToSpeculativelyExecute(BB1V, DL)) + // Check for passingValueIsAlwaysUndefined here because we would rather + // eliminate undefined control flow then converting it to a select. + if (passingValueIsAlwaysUndefined(BB1V, PN) || + passingValueIsAlwaysUndefined(BB2V, PN)) + return Changed; + + if (isa(BB1V) && !isSafeToSpeculativelyExecute(BB1V)) return Changed; - if (isa(BB2V) && !isSafeToSpeculativelyExecute(BB2V, DL)) + if (isa(BB2V) && !isSafeToSpeculativelyExecute(BB2V)) return Changed; } } // Okay, it is safe to hoist the terminator. Instruction *NT = I1->clone(); - BIParent->getInstList().insert(BI, NT); + BIParent->getInstList().insert(BI->getIterator(), NT); if (!NT->getType()->isVoidTy()) { I1->replaceAllUsesWith(NT); I2->replaceAllUsesWith(NT); @@ -1135,7 +1221,7 @@ HoistTerminator: return true; } -/// SinkThenElseCodeToEnd - Given an unconditional branch that goes to BBEnd, +/// Given an unconditional branch that goes to BBEnd, /// check whether BBEnd has only two predecessors and the other predecessor /// ends with an unconditional branch. If it is true, sink any common code /// in the two predecessors to BBEnd. @@ -1159,14 +1245,13 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { return false; // Gather the PHI nodes in BBEnd. - std::map > MapValueFromBB1ToBB2; + SmallDenseMap, PHINode *> JointValueMap; Instruction *FirstNonPhiInBBEnd = nullptr; - for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end(); - I != E; ++I) { + for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end(); I != E; ++I) { if (PHINode *PN = dyn_cast(I)) { Value *BB1V = PN->getIncomingValueForBlock(BB1); Value *BB2V = PN->getIncomingValueForBlock(BB2); - MapValueFromBB1ToBB2[BB1V] = std::make_pair(BB2V, PN); + JointValueMap[std::make_pair(BB1V, BB2V)] = PN; } else { FirstNonPhiInBBEnd = &*I; break; @@ -1175,13 +1260,13 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { if (!FirstNonPhiInBBEnd) return false; - // This does very trivial matching, with limited scanning, to find identical // instructions in the two blocks. We scan backward for obviously identical // instructions in an identical order. BasicBlock::InstListType::reverse_iterator RI1 = BB1->getInstList().rbegin(), - RE1 = BB1->getInstList().rend(), RI2 = BB2->getInstList().rbegin(), - RE2 = BB2->getInstList().rend(); + RE1 = BB1->getInstList().rend(), + RI2 = BB2->getInstList().rbegin(), + RE2 = BB2->getInstList().rend(); // Skip debug info. while (RI1 != RE1 && isa(&*RI1)) ++RI1; if (RI1 == RE1) @@ -1204,21 +1289,22 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { return Changed; Instruction *I1 = &*RI1, *I2 = &*RI2; + auto InstPair = std::make_pair(I1, I2); // I1 and I2 should have a single use in the same PHI node, and they // perform the same operation. // Cannot move control-flow-involving, volatile loads, vaarg, etc. if (isa(I1) || isa(I2) || isa(I1) || isa(I2) || - isa(I1) || isa(I2) || + I1->isEHPad() || I2->isEHPad() || isa(I1) || isa(I2) || I1->mayHaveSideEffects() || I2->mayHaveSideEffects() || I1->mayReadOrWriteMemory() || I2->mayReadOrWriteMemory() || !I1->hasOneUse() || !I2->hasOneUse() || - MapValueFromBB1ToBB2.find(I1) == MapValueFromBB1ToBB2.end() || - MapValueFromBB1ToBB2[I1].first != I2) + !JointValueMap.count(InstPair)) return Changed; // Check whether we should swap the operands of ICmpInst. + // TODO: Add support of communativity. ICmpInst *ICmp1 = dyn_cast(I1), *ICmp2 = dyn_cast(I2); bool SwapOpnds = false; if (ICmp1 && ICmp2 && @@ -1239,16 +1325,13 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { // with a PHI node after sinking. We only handle the case where there is // a single pair of different operands. Value *DifferentOp1 = nullptr, *DifferentOp2 = nullptr; - unsigned Op1Idx = 0; + unsigned Op1Idx = ~0U; for (unsigned I = 0, E = I1->getNumOperands(); I != E; ++I) { if (I1->getOperand(I) == I2->getOperand(I)) continue; - // Early exit if we have more-than one pair of different operands or - // the different operand is already in MapValueFromBB1ToBB2. - // Early exit if we need a PHI node to replace a constant. - if (DifferentOp1 || - MapValueFromBB1ToBB2.find(I1->getOperand(I)) != - MapValueFromBB1ToBB2.end() || + // Early exit if we have more-than one pair of different operands or if + // we need a PHI node to replace a constant. + if (Op1Idx != ~0U || isa(I1->getOperand(I)) || isa(I2->getOperand(I))) { // If we can't sink the instructions, undo the swapping. @@ -1261,29 +1344,33 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { DifferentOp2 = I2->getOperand(I); } - // We insert the pair of different operands to MapValueFromBB1ToBB2 and - // remove (I1, I2) from MapValueFromBB1ToBB2. - if (DifferentOp1) { - PHINode *NewPN = PHINode::Create(DifferentOp1->getType(), 2, - DifferentOp1->getName() + ".sink", - BBEnd->begin()); - MapValueFromBB1ToBB2[DifferentOp1] = std::make_pair(DifferentOp2, NewPN); + DEBUG(dbgs() << "SINK common instructions " << *I1 << "\n"); + DEBUG(dbgs() << " " << *I2 << "\n"); + + // We insert the pair of different operands to JointValueMap and + // remove (I1, I2) from JointValueMap. + if (Op1Idx != ~0U) { + auto &NewPN = JointValueMap[std::make_pair(DifferentOp1, DifferentOp2)]; + if (!NewPN) { + NewPN = + PHINode::Create(DifferentOp1->getType(), 2, + DifferentOp1->getName() + ".sink", &BBEnd->front()); + NewPN->addIncoming(DifferentOp1, BB1); + NewPN->addIncoming(DifferentOp2, BB2); + DEBUG(dbgs() << "Create PHI node " << *NewPN << "\n";); + } // I1 should use NewPN instead of DifferentOp1. I1->setOperand(Op1Idx, NewPN); - NewPN->addIncoming(DifferentOp1, BB1); - NewPN->addIncoming(DifferentOp2, BB2); - DEBUG(dbgs() << "Create PHI node " << *NewPN << "\n";); } - PHINode *OldPN = MapValueFromBB1ToBB2[I1].second; - MapValueFromBB1ToBB2.erase(I1); + PHINode *OldPN = JointValueMap[InstPair]; + JointValueMap.erase(InstPair); - DEBUG(dbgs() << "SINK common instructions " << *I1 << "\n";); - DEBUG(dbgs() << " " << *I2 << "\n";); // We need to update RE1 and RE2 if we are going to sink the first // instruction in the basic block down. bool UpdateRE1 = (I1 == BB1->begin()), UpdateRE2 = (I2 == BB2->begin()); // Sink the instruction. - BBEnd->getInstList().splice(FirstNonPhiInBBEnd, BB1->getInstList(), I1); + BBEnd->getInstList().splice(FirstNonPhiInBBEnd->getIterator(), + BB1->getInstList(), I1); if (!OldPN->use_empty()) OldPN->replaceAllUsesWith(I1); OldPN->eraseFromParent(); @@ -1291,13 +1378,15 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { if (!I2->use_empty()) I2->replaceAllUsesWith(I1); I1->intersectOptionalDataWith(I2); + // TODO: Use combineMetadata here to preserve what metadata we can + // (analogous to the hoisting case above). I2->eraseFromParent(); if (UpdateRE1) RE1 = BB1->getInstList().rend(); if (UpdateRE2) RE2 = BB2->getInstList().rend(); - FirstNonPhiInBBEnd = I1; + FirstNonPhiInBBEnd = &*I1; NumSinkCommons++; Changed = true; } @@ -1402,7 +1491,7 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB, /// /// \returns true if the conditional block is removed. static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, - const DataLayout *DL) { + const TargetTransformInfo &TTI) { // Be conservative for now. FP select instruction can often be expensive. Value *BrCond = BI->getCondition(); if (isa(BrCond)) @@ -1433,25 +1522,25 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, for (BasicBlock::iterator BBI = ThenBB->begin(), BBE = std::prev(ThenBB->end()); BBI != BBE; ++BBI) { - Instruction *I = BBI; + Instruction *I = &*BBI; // Skip debug info. if (isa(I)) continue; - // Only speculatively execution a single instruction (not counting the + // Only speculatively execute a single instruction (not counting the // terminator) for now. ++SpeculationCost; if (SpeculationCost > 1) return false; // Don't hoist the instruction if it's unsafe or expensive. - if (!isSafeToSpeculativelyExecute(I, DL) && - !(HoistCondStores && - (SpeculatedStoreValue = isSafeToSpeculateStore(I, BB, ThenBB, - EndBB)))) + if (!isSafeToSpeculativelyExecute(I) && + !(HoistCondStores && (SpeculatedStoreValue = isSafeToSpeculateStore( + I, BB, ThenBB, EndBB)))) return false; if (!SpeculatedStoreValue && - ComputeSpeculationCost(I, DL) > PHINodeFoldingThreshold) + ComputeSpeculationCost(I, TTI) > + PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic) return false; // Store the store speculation candidate. @@ -1496,18 +1585,25 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, if (ThenV == OrigV) continue; + // Don't convert to selects if we could remove undefined behavior instead. + if (passingValueIsAlwaysUndefined(OrigV, PN) || + passingValueIsAlwaysUndefined(ThenV, PN)) + return false; + HaveRewritablePHIs = true; ConstantExpr *OrigCE = dyn_cast(OrigV); ConstantExpr *ThenCE = dyn_cast(ThenV); if (!OrigCE && !ThenCE) continue; // Known safe and cheap. - if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE, DL)) || - (OrigCE && !isSafeToSpeculativelyExecute(OrigCE, DL))) + if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE)) || + (OrigCE && !isSafeToSpeculativelyExecute(OrigCE))) return false; - unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE, DL) : 0; - unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE, DL) : 0; - if (OrigCost + ThenCost > 2 * PHINodeFoldingThreshold) + unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE, TTI) : 0; + unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE, TTI) : 0; + unsigned MaxCost = 2 * PHINodeFoldingThreshold * + TargetTransformInfo::TCC_Basic; + if (OrigCost + ThenCost > MaxCost) return false; // Account for the cost of an unfolded ConstantExpr which could end up @@ -1539,9 +1635,14 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, SpeculatedStore->setOperand(0, S); } + // Metadata can be dependent on the condition we are hoisting above. + // Conservatively strip all metadata on the instruction. + for (auto &I: *ThenBB) + I.dropUnknownNonDebugMetadata(); + // Hoist the instructions. - BB->getInstList().splice(BI, ThenBB->getInstList(), ThenBB->begin(), - std::prev(ThenBB->end())); + BB->getInstList().splice(BI->getIterator(), ThenBB->getInstList(), + ThenBB->begin(), std::prev(ThenBB->end())); // Insert selects and rewrite the PHI operands. IRBuilder Builder(BI); @@ -1585,8 +1686,7 @@ static bool HasNoDuplicateCall(const BasicBlock *BB) { return false; } -/// BlockIsSimpleEnoughToThreadThrough - Return true if we can thread a branch -/// across this block. +/// Return true if we can thread a branch across this block. static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { BranchInst *BI = cast(BB->getTerminator()); unsigned Size = 0; @@ -1610,11 +1710,10 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { return true; } -/// FoldCondBranchOnPHI - If we have a conditional branch on a PHI node value -/// that is defined in the same block as the branch and if any PHI entries are -/// constants, thread edges corresponding to that entry to be branches to their -/// ultimate destination. -static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) { +/// If we have a conditional branch on a PHI node value that is defined in the +/// same block as the branch and if any PHI entries are constants, thread edges +/// corresponding to that entry to be branches to their ultimate destination. +static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout &DL) { BasicBlock *BB = BI->getParent(); PHINode *PN = dyn_cast(BI->getCondition()); // NOTE: we currently cannot transform this case if the PHI node is used @@ -1684,13 +1783,13 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) { // Check for trivial simplification. if (Value *V = SimplifyInstruction(N, DL)) { - TranslateMap[BBI] = V; + TranslateMap[&*BBI] = V; delete N; // Instruction folded away, don't need actual inst } else { // Insert the new instruction into its new home. EdgeBB->getInstList().insert(InsertPt, N); if (!BBI->use_empty()) - TranslateMap[BBI] = N; + TranslateMap[&*BBI] = N; } } @@ -1710,9 +1809,10 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) { return false; } -/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry -/// PHI node, see if we can eliminate it. -static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { +/// Given a BB that starts with the specified two-entry PHI node, +/// see if we can eliminate it. +static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI, + const DataLayout &DL) { // Ok, this is a two entry PHI node. Check to see if this is a simple "if // statement", which has a very simple dominance structure. Basically, we // are trying to find the condition that is being branched on, which @@ -1743,6 +1843,8 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { SmallPtrSet AggressiveInsts; unsigned MaxCostVal0 = PHINodeFoldingThreshold, MaxCostVal1 = PHINodeFoldingThreshold; + MaxCostVal0 *= TargetTransformInfo::TCC_Basic; + MaxCostVal1 *= TargetTransformInfo::TCC_Basic; for (BasicBlock::iterator II = BB->begin(); isa(II);) { PHINode *PN = cast(II++); @@ -1753,9 +1855,9 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { } if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts, - MaxCostVal0, DL) || + MaxCostVal0, TTI) || !DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts, - MaxCostVal1, DL)) + MaxCostVal1, TTI)) return false; } @@ -1784,7 +1886,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { } else { DomBlock = *pred_begin(IfBlock1); for (BasicBlock::iterator I = IfBlock1->begin();!isa(I);++I) - if (!AggressiveInsts.count(I) && !isa(I)) { + if (!AggressiveInsts.count(&*I) && !isa(I)) { // This is not an aggressive instruction that we can promote. // Because of this, we won't be able to get rid of the control // flow, so the xform is not worth it. @@ -1797,7 +1899,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { } else { DomBlock = *pred_begin(IfBlock2); for (BasicBlock::iterator I = IfBlock2->begin();!isa(I);++I) - if (!AggressiveInsts.count(I) && !isa(I)) { + if (!AggressiveInsts.count(&*I) && !isa(I)) { // This is not an aggressive instruction that we can promote. // Because of this, we won't be able to get rid of the control // flow, so the xform is not worth it. @@ -1816,13 +1918,13 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { // Move all 'aggressive' instructions, which are defined in the // conditional parts of the if's up to the dominating block. if (IfBlock1) - DomBlock->getInstList().splice(InsertPt, + DomBlock->getInstList().splice(InsertPt->getIterator(), IfBlock1->getInstList(), IfBlock1->begin(), - IfBlock1->getTerminator()); + IfBlock1->getTerminator()->getIterator()); if (IfBlock2) - DomBlock->getInstList().splice(InsertPt, + DomBlock->getInstList().splice(InsertPt->getIterator(), IfBlock2->getInstList(), IfBlock2->begin(), - IfBlock2->getTerminator()); + IfBlock2->getTerminator()->getIterator()); while (PHINode *PN = dyn_cast(BB->begin())) { // Change the PHI node into a select instruction. @@ -1846,8 +1948,8 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { return true; } -/// SimplifyCondBranchToTwoReturns - If we found a conditional branch that goes -/// to two returning blocks, try to merge them together into one return, +/// If we found a conditional branch that goes to two returning blocks, +/// try to merge them together into one return, /// introducing a select if the return values disagree. static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, IRBuilder<> &Builder) { @@ -1934,27 +2036,27 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, return true; } -/// ExtractBranchMetadata - Given a conditional BranchInstruction, retrieve the -/// probabilities of the branch taking each edge. Fills in the two APInt -/// parameters and return true, or returns false if no or invalid metadata was -/// found. +/// Given a conditional BranchInstruction, retrieve the probabilities of the +/// branch taking each edge. Fills in the two APInt parameters and returns true, +/// or returns false if no or invalid metadata was found. static bool ExtractBranchMetadata(BranchInst *BI, uint64_t &ProbTrue, uint64_t &ProbFalse) { assert(BI->isConditional() && "Looking for probabilities on unconditional branch?"); MDNode *ProfileData = BI->getMetadata(LLVMContext::MD_prof); if (!ProfileData || ProfileData->getNumOperands() != 3) return false; - ConstantInt *CITrue = dyn_cast(ProfileData->getOperand(1)); - ConstantInt *CIFalse = dyn_cast(ProfileData->getOperand(2)); + ConstantInt *CITrue = + mdconst::dyn_extract(ProfileData->getOperand(1)); + ConstantInt *CIFalse = + mdconst::dyn_extract(ProfileData->getOperand(2)); if (!CITrue || !CIFalse) return false; ProbTrue = CITrue->getValue().getZExtValue(); ProbFalse = CIFalse->getValue().getZExtValue(); return true; } -/// checkCSEInPredecessor - Return true if the given instruction is available +/// Return true if the given instruction is available /// in its predecessor block. If yes, the instruction will be removed. -/// static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) { if (!isa(Inst) && !isa(Inst)) return false; @@ -1970,10 +2072,10 @@ static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) { return false; } -/// FoldBranchToCommonDest - If this basic block is simple enough, and if a -/// predecessor branches to us and one of our successors, fold the block into -/// the predecessor and use logical operations to pick the right destination. -bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { +/// If this basic block is simple enough, and if a predecessor branches to us +/// and one of our successors, fold the block into the predecessor and use +/// logical operations to pick the right destination. +bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) { BasicBlock *BB = BI->getParent(); Instruction *Cond = nullptr; @@ -1991,7 +2093,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { BI->getSuccessor(0) == PBI->getSuccessor(1))) { for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { - Instruction *Curr = I++; + Instruction *Curr = &*I++; if (isa(Curr)) { Cond = Curr; break; @@ -2010,35 +2112,8 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { Cond->getParent() != BB || !Cond->hasOneUse()) return false; - // Only allow this if the condition is a simple instruction that can be - // executed unconditionally. It must be in the same block as the branch, and - // must be at the front of the block. - BasicBlock::iterator FrontIt = BB->front(); - - // Ignore dbg intrinsics. - while (isa(FrontIt)) ++FrontIt; - - // Allow a single instruction to be hoisted in addition to the compare - // that feeds the branch. We later ensure that any values that _it_ uses - // were also live in the predecessor, so that we don't unnecessarily create - // register pressure or inhibit out-of-order execution. - Instruction *BonusInst = nullptr; - if (&*FrontIt != Cond && - FrontIt->hasOneUse() && FrontIt->user_back() == Cond && - isSafeToSpeculativelyExecute(FrontIt, DL)) { - BonusInst = &*FrontIt; - ++FrontIt; - - // Ignore dbg intrinsics. - while (isa(FrontIt)) ++FrontIt; - } - - // Only a single bonus inst is allowed. - if (&*FrontIt != Cond) - return false; - // Make sure the instruction after the condition is the cond branch. - BasicBlock::iterator CondIt = Cond; ++CondIt; + BasicBlock::iterator CondIt = ++Cond->getIterator(); // Ignore dbg intrinsics. while (isa(CondIt)) ++CondIt; @@ -2046,6 +2121,31 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { if (&*CondIt != BI) return false; + // Only allow this transformation if computing the condition doesn't involve + // too many instructions and these involved instructions can be executed + // unconditionally. We denote all involved instructions except the condition + // as "bonus instructions", and only allow this transformation when the + // number of the bonus instructions does not exceed a certain threshold. + unsigned NumBonusInsts = 0; + for (auto I = BB->begin(); Cond != I; ++I) { + // Ignore dbg intrinsics. + if (isa(I)) + continue; + if (!I->hasOneUse() || !isSafeToSpeculativelyExecute(&*I)) + return false; + // I has only one use and can be executed unconditionally. + Instruction *User = dyn_cast(I->user_back()); + if (User == nullptr || User->getParent() != BB) + return false; + // I is used in the same BB. Since BI uses Cond and doesn't have more slots + // to use any other instruction, User must be an instruction between next(I) + // and Cond. + ++NumBonusInsts; + // Early exits once we reach the limit. + if (NumBonusInsts > BonusInstThreshold) + return false; + } + // Cond is known to be a compare or binary operator. Check to make sure that // neither operand is a potentially-trapping constant expression. if (ConstantExpr *CE = dyn_cast(Cond->getOperand(0))) @@ -2115,30 +2215,41 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { PBI->swapSuccessors(); } - // If we have a bonus inst, clone it into the predecessor block. - Instruction *NewBonus = nullptr; - if (BonusInst) { - NewBonus = BonusInst->clone(); + // If we have bonus instructions, clone them into the predecessor block. + // Note that there may be multiple predecessor blocks, so we cannot move + // bonus instructions to a predecessor block. + ValueToValueMapTy VMap; // maps original values to cloned values + // We already make sure Cond is the last instruction before BI. Therefore, + // all instructions before Cond other than DbgInfoIntrinsic are bonus + // instructions. + for (auto BonusInst = BB->begin(); Cond != BonusInst; ++BonusInst) { + if (isa(BonusInst)) + continue; + Instruction *NewBonusInst = BonusInst->clone(); + RemapInstruction(NewBonusInst, VMap, + RF_NoModuleLevelChanges | RF_IgnoreMissingEntries); + VMap[&*BonusInst] = NewBonusInst; // If we moved a load, we cannot any longer claim any knowledge about // its potential value. The previous information might have been valid // only given the branch precondition. // For an analogous reason, we must also drop all the metadata whose // semantics we don't understand. - NewBonus->dropUnknownMetadata(LLVMContext::MD_dbg); + NewBonusInst->dropUnknownNonDebugMetadata(); - PredBlock->getInstList().insert(PBI, NewBonus); - NewBonus->takeName(BonusInst); - BonusInst->setName(BonusInst->getName()+".old"); + PredBlock->getInstList().insert(PBI->getIterator(), NewBonusInst); + NewBonusInst->takeName(&*BonusInst); + BonusInst->setName(BonusInst->getName() + ".old"); } // Clone Cond into the predecessor basic block, and or/and the // two conditions together. Instruction *New = Cond->clone(); - if (BonusInst) New->replaceUsesOfWith(BonusInst, NewBonus); - PredBlock->getInstList().insert(PBI, New); + RemapInstruction(New, VMap, + RF_NoModuleLevelChanges | RF_IgnoreMissingEntries); + PredBlock->getInstList().insert(PBI->getIterator(), New); New->takeName(Cond); - Cond->setName(New->getName()+".old"); + Cond->setName(New->getName() + ".old"); if (BI->isConditional()) { Instruction *NewCond = @@ -2257,11 +2368,297 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) { return false; } -/// SimplifyCondBranchToCondBranch - If we have a conditional branch as a -/// predecessor of another block, this function tries to simplify it. We know +// If there is only one store in BB1 and BB2, return it, otherwise return +// nullptr. +static StoreInst *findUniqueStoreInBlocks(BasicBlock *BB1, BasicBlock *BB2) { + StoreInst *S = nullptr; + for (auto *BB : {BB1, BB2}) { + if (!BB) + continue; + for (auto &I : *BB) + if (auto *SI = dyn_cast(&I)) { + if (S) + // Multiple stores seen. + return nullptr; + else + S = SI; + } + } + return S; +} + +static Value *ensureValueAvailableInSuccessor(Value *V, BasicBlock *BB, + Value *AlternativeV = nullptr) { + // PHI is going to be a PHI node that allows the value V that is defined in + // BB to be referenced in BB's only successor. + // + // If AlternativeV is nullptr, the only value we care about in PHI is V. It + // doesn't matter to us what the other operand is (it'll never get used). We + // could just create a new PHI with an undef incoming value, but that could + // increase register pressure if EarlyCSE/InstCombine can't fold it with some + // other PHI. So here we directly look for some PHI in BB's successor with V + // as an incoming operand. If we find one, we use it, else we create a new + // one. + // + // If AlternativeV is not nullptr, we care about both incoming values in PHI. + // PHI must be exactly: phi [ %BB, %V ], [ %OtherBB, %AlternativeV] + // where OtherBB is the single other predecessor of BB's only successor. + PHINode *PHI = nullptr; + BasicBlock *Succ = BB->getSingleSuccessor(); + + for (auto I = Succ->begin(); isa(I); ++I) + if (cast(I)->getIncomingValueForBlock(BB) == V) { + PHI = cast(I); + if (!AlternativeV) + break; + + assert(std::distance(pred_begin(Succ), pred_end(Succ)) == 2); + auto PredI = pred_begin(Succ); + BasicBlock *OtherPredBB = *PredI == BB ? *++PredI : *PredI; + if (PHI->getIncomingValueForBlock(OtherPredBB) == AlternativeV) + break; + PHI = nullptr; + } + if (PHI) + return PHI; + + // If V is not an instruction defined in BB, just return it. + if (!AlternativeV && + (!isa(V) || cast(V)->getParent() != BB)) + return V; + + PHI = PHINode::Create(V->getType(), 2, "simplifycfg.merge", &Succ->front()); + PHI->addIncoming(V, BB); + for (BasicBlock *PredBB : predecessors(Succ)) + if (PredBB != BB) + PHI->addIncoming(AlternativeV ? AlternativeV : UndefValue::get(V->getType()), + PredBB); + return PHI; +} + +static bool mergeConditionalStoreToAddress(BasicBlock *PTB, BasicBlock *PFB, + BasicBlock *QTB, BasicBlock *QFB, + BasicBlock *PostBB, Value *Address, + bool InvertPCond, bool InvertQCond) { + auto IsaBitcastOfPointerType = [](const Instruction &I) { + return Operator::getOpcode(&I) == Instruction::BitCast && + I.getType()->isPointerTy(); + }; + + // If we're not in aggressive mode, we only optimize if we have some + // confidence that by optimizing we'll allow P and/or Q to be if-converted. + auto IsWorthwhile = [&](BasicBlock *BB) { + if (!BB) + return true; + // Heuristic: if the block can be if-converted/phi-folded and the + // instructions inside are all cheap (arithmetic/GEPs), it's worthwhile to + // thread this store. + unsigned N = 0; + for (auto &I : *BB) { + // Cheap instructions viable for folding. + if (isa(I) || isa(I) || + isa(I)) + ++N; + // Free instructions. + else if (isa(I) || isa(I) || + IsaBitcastOfPointerType(I)) + continue; + else + return false; + } + return N <= PHINodeFoldingThreshold; + }; + + if (!MergeCondStoresAggressively && (!IsWorthwhile(PTB) || + !IsWorthwhile(PFB) || + !IsWorthwhile(QTB) || + !IsWorthwhile(QFB))) + return false; + + // For every pointer, there must be exactly two stores, one coming from + // PTB or PFB, and the other from QTB or QFB. We don't support more than one + // store (to any address) in PTB,PFB or QTB,QFB. + // FIXME: We could relax this restriction with a bit more work and performance + // testing. + StoreInst *PStore = findUniqueStoreInBlocks(PTB, PFB); + StoreInst *QStore = findUniqueStoreInBlocks(QTB, QFB); + if (!PStore || !QStore) + return false; + + // Now check the stores are compatible. + if (!QStore->isUnordered() || !PStore->isUnordered()) + return false; + + // Check that sinking the store won't cause program behavior changes. Sinking + // the store out of the Q blocks won't change any behavior as we're sinking + // from a block to its unconditional successor. But we're moving a store from + // the P blocks down through the middle block (QBI) and past both QFB and QTB. + // So we need to check that there are no aliasing loads or stores in + // QBI, QTB and QFB. We also need to check there are no conflicting memory + // operations between PStore and the end of its parent block. + // + // The ideal way to do this is to query AliasAnalysis, but we don't + // preserve AA currently so that is dangerous. Be super safe and just + // check there are no other memory operations at all. + for (auto &I : *QFB->getSinglePredecessor()) + if (I.mayReadOrWriteMemory()) + return false; + for (auto &I : *QFB) + if (&I != QStore && I.mayReadOrWriteMemory()) + return false; + if (QTB) + for (auto &I : *QTB) + if (&I != QStore && I.mayReadOrWriteMemory()) + return false; + for (auto I = BasicBlock::iterator(PStore), E = PStore->getParent()->end(); + I != E; ++I) + if (&*I != PStore && I->mayReadOrWriteMemory()) + return false; + + // OK, we're going to sink the stores to PostBB. The store has to be + // conditional though, so first create the predicate. + Value *PCond = cast(PFB->getSinglePredecessor()->getTerminator()) + ->getCondition(); + Value *QCond = cast(QFB->getSinglePredecessor()->getTerminator()) + ->getCondition(); + + Value *PPHI = ensureValueAvailableInSuccessor(PStore->getValueOperand(), + PStore->getParent()); + Value *QPHI = ensureValueAvailableInSuccessor(QStore->getValueOperand(), + QStore->getParent(), PPHI); + + IRBuilder<> QB(&*PostBB->getFirstInsertionPt()); + + Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond); + Value *QPred = QStore->getParent() == QTB ? QCond : QB.CreateNot(QCond); + + if (InvertPCond) + PPred = QB.CreateNot(PPred); + if (InvertQCond) + QPred = QB.CreateNot(QPred); + Value *CombinedPred = QB.CreateOr(PPred, QPred); + + auto *T = + SplitBlockAndInsertIfThen(CombinedPred, &*QB.GetInsertPoint(), false); + QB.SetInsertPoint(T); + StoreInst *SI = cast(QB.CreateStore(QPHI, Address)); + AAMDNodes AAMD; + PStore->getAAMetadata(AAMD, /*Merge=*/false); + PStore->getAAMetadata(AAMD, /*Merge=*/true); + SI->setAAMetadata(AAMD); + + QStore->eraseFromParent(); + PStore->eraseFromParent(); + + return true; +} + +static bool mergeConditionalStores(BranchInst *PBI, BranchInst *QBI) { + // The intention here is to find diamonds or triangles (see below) where each + // conditional block contains a store to the same address. Both of these + // stores are conditional, so they can't be unconditionally sunk. But it may + // be profitable to speculatively sink the stores into one merged store at the + // end, and predicate the merged store on the union of the two conditions of + // PBI and QBI. + // + // This can reduce the number of stores executed if both of the conditions are + // true, and can allow the blocks to become small enough to be if-converted. + // This optimization will also chain, so that ladders of test-and-set + // sequences can be if-converted away. + // + // We only deal with simple diamonds or triangles: + // + // PBI or PBI or a combination of the two + // / \ | \ + // PTB PFB | PFB + // \ / | / + // QBI QBI + // / \ | \ + // QTB QFB | QFB + // \ / | / + // PostBB PostBB + // + // We model triangles as a type of diamond with a nullptr "true" block. + // Triangles are canonicalized so that the fallthrough edge is represented by + // a true condition, as in the diagram above. + // + BasicBlock *PTB = PBI->getSuccessor(0); + BasicBlock *PFB = PBI->getSuccessor(1); + BasicBlock *QTB = QBI->getSuccessor(0); + BasicBlock *QFB = QBI->getSuccessor(1); + BasicBlock *PostBB = QFB->getSingleSuccessor(); + + bool InvertPCond = false, InvertQCond = false; + // Canonicalize fallthroughs to the true branches. + if (PFB == QBI->getParent()) { + std::swap(PFB, PTB); + InvertPCond = true; + } + if (QFB == PostBB) { + std::swap(QFB, QTB); + InvertQCond = true; + } + + // From this point on we can assume PTB or QTB may be fallthroughs but PFB + // and QFB may not. Model fallthroughs as a nullptr block. + if (PTB == QBI->getParent()) + PTB = nullptr; + if (QTB == PostBB) + QTB = nullptr; + + // Legality bailouts. We must have at least the non-fallthrough blocks and + // the post-dominating block, and the non-fallthroughs must only have one + // predecessor. + auto HasOnePredAndOneSucc = [](BasicBlock *BB, BasicBlock *P, BasicBlock *S) { + return BB->getSinglePredecessor() == P && + BB->getSingleSuccessor() == S; + }; + if (!PostBB || + !HasOnePredAndOneSucc(PFB, PBI->getParent(), QBI->getParent()) || + !HasOnePredAndOneSucc(QFB, QBI->getParent(), PostBB)) + return false; + if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) || + (QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB))) + return false; + if (PostBB->getNumUses() != 2 || QBI->getParent()->getNumUses() != 2) + return false; + + // OK, this is a sequence of two diamonds or triangles. + // Check if there are stores in PTB or PFB that are repeated in QTB or QFB. + SmallPtrSet PStoreAddresses, QStoreAddresses; + for (auto *BB : {PTB, PFB}) { + if (!BB) + continue; + for (auto &I : *BB) + if (StoreInst *SI = dyn_cast(&I)) + PStoreAddresses.insert(SI->getPointerOperand()); + } + for (auto *BB : {QTB, QFB}) { + if (!BB) + continue; + for (auto &I : *BB) + if (StoreInst *SI = dyn_cast(&I)) + QStoreAddresses.insert(SI->getPointerOperand()); + } + + set_intersect(PStoreAddresses, QStoreAddresses); + // set_intersect mutates PStoreAddresses in place. Rename it here to make it + // clear what it contains. + auto &CommonAddresses = PStoreAddresses; + + bool Changed = false; + for (auto *Address : CommonAddresses) + Changed |= mergeConditionalStoreToAddress( + PTB, PFB, QTB, QFB, PostBB, Address, InvertPCond, InvertQCond); + return Changed; +} + +/// If we have a conditional branch as a predecessor of another block, +/// this function tries to simplify it. We know /// that PBI and BI are both conditional branches, and BI is in one of the /// successor blocks of PBI - PBI branches to BI. -static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { +static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, + const DataLayout &DL) { assert(PBI->isConditional() && BI->isConditional()); BasicBlock *BB = BI->getParent(); @@ -2285,10 +2682,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { // simplifycfg will thread the block. if (BlockIsSimpleEnoughToThreadThrough(BB)) { pred_iterator PB = pred_begin(BB), PE = pred_end(BB); - PHINode *NewPN = PHINode::Create(Type::getInt1Ty(BB->getContext()), - std::distance(PB, PE), - BI->getCondition()->getName() + ".pr", - BB->begin()); + PHINode *NewPN = PHINode::Create( + Type::getInt1Ty(BB->getContext()), std::distance(PB, PE), + BI->getCondition()->getName() + ".pr", &BB->front()); // Okay, we're going to insert the PHI node. Since PBI is not the only // predecessor, compute the PHI'd conditional value for all of the preds. // Any predecessor where the condition is not computable we keep symbolic. @@ -2311,6 +2707,29 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { } } + if (auto *CE = dyn_cast(BI->getCondition())) + if (CE->canTrap()) + return false; + + // If BI is reached from the true path of PBI and PBI's condition implies + // BI's condition, we know the direction of the BI branch. + if (PBI->getSuccessor(0) == BI->getParent() && + isImpliedCondition(PBI->getCondition(), BI->getCondition(), DL) && + PBI->getSuccessor(0) != PBI->getSuccessor(1) && + BB->getSinglePredecessor()) { + // Turn this into a branch on constant. + auto *OldCond = BI->getCondition(); + BI->setCondition(ConstantInt::getTrue(BB->getContext())); + RecursivelyDeleteTriviallyDeadInstructions(OldCond); + return true; // Nuke the branch on constant. + } + + // If both branches are conditional and both contain stores to the same + // address, remove the stores from the conditionals and create a conditional + // merged store at the end. + if (MergeCondStores && mergeConditionalStores(PBI, BI)) + return true; + // If this is a conditional branch in an empty block, and if any // predecessors are a conditional branch to one of our destinations, // fold the conditions into logical ops and one cond br. @@ -2321,11 +2740,6 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { if (&*BBI != BI) return false; - - if (ConstantExpr *CE = dyn_cast(BI->getCondition())) - if (CE->canTrap()) - return false; - int PBIOp, BIOp; if (PBI->getSuccessor(0) == BI->getSuccessor(0)) PBIOp = BIOp = 0; @@ -2432,17 +2846,15 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { // The weight to CommonDest should be PredCommon * SuccTotal + // PredOther * SuccCommon. // The weight to OtherDest should be PredOther * SuccOther. - SmallVector NewWeights; - NewWeights.push_back(PredCommon * (SuccCommon + SuccOther) + - PredOther * SuccCommon); - NewWeights.push_back(PredOther * SuccOther); + uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) + + PredOther * SuccCommon, + PredOther * SuccOther}; // Halve the weights if any of them cannot fit in an uint32_t FitWeights(NewWeights); - SmallVector MDWeights(NewWeights.begin(),NewWeights.end()); PBI->setMetadata(LLVMContext::MD_prof, - MDBuilder(BI->getContext()). - createBranchWeights(MDWeights)); + MDBuilder(BI->getContext()) + .createBranchWeights(NewWeights[0], NewWeights[1])); } // OtherDest may have phi nodes. If so, add an entry from PBI's @@ -2475,8 +2887,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { return true; } -// SimplifyTerminatorOnSelect - Simplifies a terminator by replacing it with a -// branch to TrueBB if Cond is true or to FalseBB if Cond is false. +// Simplifies a terminator by replacing it with a branch to TrueBB if Cond is +// true or to FalseBB if Cond is false. // Takes care of updating the successors and removing the old terminator. // Also makes sure not to introduce new successors by assuming that edges to // non-successor TrueBBs and FalseBBs aren't reachable. @@ -2492,15 +2904,15 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond, BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr; // Then remove the rest. - for (unsigned I = 0, E = OldTerm->getNumSuccessors(); I != E; ++I) { - BasicBlock *Succ = OldTerm->getSuccessor(I); + for (BasicBlock *Succ : OldTerm->successors()) { // Make sure only to keep exactly one copy of each edge. if (Succ == KeepEdge1) KeepEdge1 = nullptr; else if (Succ == KeepEdge2) KeepEdge2 = nullptr; else - Succ->removePredecessor(OldTerm->getParent()); + Succ->removePredecessor(OldTerm->getParent(), + /*DontDeleteUselessPHIs=*/true); } IRBuilder<> Builder(OldTerm); @@ -2541,7 +2953,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond, return true; } -// SimplifySwitchOnSelect - Replaces +// Replaces // (switch (select cond, X, Y)) on constant X, Y // with a branch - conditional if X and Y lead to distinct BBs, // unconditional otherwise. @@ -2576,7 +2988,7 @@ static bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) { TrueWeight, FalseWeight); } -// SimplifyIndirectBrOnSelect - Replaces +// Replaces // (indirectbr (select cond, blockaddress(@fn, BlockA), // blockaddress(@fn, BlockB))) // with @@ -2597,8 +3009,8 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { 0, 0); } -/// TryToSimplifyUncondBranchWithICmpInIt - This is called when we find an icmp -/// instruction (a seteq/setne with a constant) as the only instruction in a +/// This is called when we find an icmp instruction +/// (a seteq/setne with a constant) as the only instruction in a /// block that ends with an uncond branch. We are looking for a very specific /// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In /// this case, we merge the first two "or's of icmp" into a switch, but then the @@ -2615,8 +3027,9 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { /// We prefer to split the edge to 'end' so that there is a true/false entry to /// the PHI, merging the third icmp into the switch. static bool TryToSimplifyUncondBranchWithICmpInIt( - ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI, - const DataLayout *DL, AssumptionTracker *AT) { + ICmpInst *ICI, IRBuilder<> &Builder, const DataLayout &DL, + const TargetTransformInfo &TTI, unsigned BonusInstThreshold, + AssumptionCache *AC) { BasicBlock *BB = ICI->getParent(); // If the block has any PHIs in it or the icmp has multiple uses, it is too @@ -2649,7 +3062,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( ICI->eraseFromParent(); } // BB is now empty, so it is likely to simplify away. - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } // Ok, the block is reachable from the default dest. If the constant we're @@ -2665,7 +3078,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); // BB is now empty, so it is likely to simplify away. - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } // The use of the icmp has to be in the 'end' block, by the only PHI node in @@ -2718,32 +3131,25 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( return true; } -/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch. +/// The specified branch is a conditional branch. /// Check to see if it is branching on an or/and chain of icmp instructions, and /// fold it into a switch instruction if so. -static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, - IRBuilder<> &Builder) { +static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, + const DataLayout &DL) { Instruction *Cond = dyn_cast(BI->getCondition()); if (!Cond) return false; - // Change br (X == 0 | X == 1), T, F into a switch instruction. // If this is a bunch of seteq's or'd together, or if it's a bunch of // 'setne's and'ed together, collect them. - Value *CompVal = nullptr; - std::vector Values; - bool TrueWhenEqual = true; - Value *ExtraCase = nullptr; - unsigned UsedICmps = 0; - if (Cond->getOpcode() == Instruction::Or) { - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true, - UsedICmps); - } else if (Cond->getOpcode() == Instruction::And) { - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false, - UsedICmps); - TrueWhenEqual = false; - } + // Try to gather values from a chain of and/or to be turned into a switch + ConstantComparesGatherer ConstantCompare(Cond, DL); + // Unpack the result + SmallVectorImpl &Values = ConstantCompare.Vals; + Value *CompVal = ConstantCompare.CompValue; + unsigned UsedICmps = ConstantCompare.UsedICmps; + Value *ExtraCase = ConstantCompare.Extra; // If we didn't have a multiply compared value, fail. if (!CompVal) return false; @@ -2752,13 +3158,15 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, if (UsedICmps <= 1) return false; + bool TrueWhenEqual = (Cond->getOpcode() == Instruction::Or); + // There might be duplicate constants in the list, which the switch // instruction can't handle, remove them now. array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate); Values.erase(std::unique(Values.begin(), Values.end()), Values.end()); // If Extra was used, we require at least two switch values to do the - // transformation. A switch with one value is just an cond branch. + // transformation. A switch with one value is just a conditional branch. if (ExtraCase && Values.size() < 2) return false; // TODO: Preserve branch weight metadata, similarly to how @@ -2778,7 +3186,8 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, // then we evaluate them with an explicit branch first. Split the block // right before the condbr to handle it. if (ExtraCase) { - BasicBlock *NewBB = BB->splitBasicBlock(BI, "switch.early.test"); + BasicBlock *NewBB = + BB->splitBasicBlock(BI->getIterator(), "switch.early.test"); // Remove the uncond branch added to the old block. TerminatorInst *OldTI = BB->getTerminator(); Builder.SetInsertPoint(OldTI); @@ -2802,10 +3211,8 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, Builder.SetInsertPoint(BI); // Convert pointer to int before we switch. if (CompVal->getType()->isPointerTy()) { - assert(DL && "Cannot switch on pointer without DataLayout"); - CompVal = Builder.CreatePtrToInt(CompVal, - DL->getIntPtrType(CompVal->getType()), - "magicptr"); + CompVal = Builder.CreatePtrToInt( + CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr"); } // Create the new switch instruction now. @@ -2834,63 +3241,235 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, } bool SimplifyCFGOpt::SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder) { - // If this is a trivial landing pad that just continues unwinding the caught - // exception then zap the landing pad, turning its invokes into calls. + if (isa(RI->getValue())) + return SimplifyCommonResume(RI); + else if (isa(RI->getParent()->getFirstNonPHI()) && + RI->getValue() == RI->getParent()->getFirstNonPHI()) + // The resume must unwind the exception that caused control to branch here. + return SimplifySingleResume(RI); + else + return false; +} + +// Simplify resume that is shared by several landing pads (phi of landing pad). +bool SimplifyCFGOpt::SimplifyCommonResume(ResumeInst *RI) { + BasicBlock *BB = RI->getParent(); + + // Check that there are no other instructions except for debug intrinsics + // between the phi of landing pads (RI->getValue()) and resume instruction. + BasicBlock::iterator I = cast(RI->getValue())->getIterator(), + E = RI->getIterator(); + while (++I != E) + if (!isa(I)) + return false; + + SmallSet TrivialUnwindBlocks; + auto *PhiLPInst = cast(RI->getValue()); + + // Check incoming blocks to see if any of them are trivial. + for (unsigned Idx = 0, End = PhiLPInst->getNumIncomingValues(); + Idx != End; Idx++) { + auto *IncomingBB = PhiLPInst->getIncomingBlock(Idx); + auto *IncomingValue = PhiLPInst->getIncomingValue(Idx); + + // If the block has other successors, we can not delete it because + // it has other dependents. + if (IncomingBB->getUniqueSuccessor() != BB) + continue; + + auto *LandingPad = + dyn_cast(IncomingBB->getFirstNonPHI()); + // Not the landing pad that caused the control to branch here. + if (IncomingValue != LandingPad) + continue; + + bool isTrivial = true; + + I = IncomingBB->getFirstNonPHI()->getIterator(); + E = IncomingBB->getTerminator()->getIterator(); + while (++I != E) + if (!isa(I)) { + isTrivial = false; + break; + } + + if (isTrivial) + TrivialUnwindBlocks.insert(IncomingBB); + } + + // If no trivial unwind blocks, don't do any simplifications. + if (TrivialUnwindBlocks.empty()) return false; + + // Turn all invokes that unwind here into calls. + for (auto *TrivialBB : TrivialUnwindBlocks) { + // Blocks that will be simplified should be removed from the phi node. + // Note there could be multiple edges to the resume block, and we need + // to remove them all. + while (PhiLPInst->getBasicBlockIndex(TrivialBB) != -1) + BB->removePredecessor(TrivialBB, true); + + for (pred_iterator PI = pred_begin(TrivialBB), PE = pred_end(TrivialBB); + PI != PE;) { + BasicBlock *Pred = *PI++; + removeUnwindEdge(Pred); + } + + // In each SimplifyCFG run, only the current processed block can be erased. + // Otherwise, it will break the iteration of SimplifyCFG pass. So instead + // of erasing TrivialBB, we only remove the branch to the common resume + // block so that we can later erase the resume block since it has no + // predecessors. + TrivialBB->getTerminator()->eraseFromParent(); + new UnreachableInst(RI->getContext(), TrivialBB); + } + + // Delete the resume block if all its predecessors have been removed. + if (pred_empty(BB)) + BB->eraseFromParent(); + + return !TrivialUnwindBlocks.empty(); +} + +// Simplify resume that is only used by a single (non-phi) landing pad. +bool SimplifyCFGOpt::SimplifySingleResume(ResumeInst *RI) { BasicBlock *BB = RI->getParent(); LandingPadInst *LPInst = dyn_cast(BB->getFirstNonPHI()); - if (RI->getValue() != LPInst) - // Not a landing pad, or the resume is not unwinding the exception that - // caused control to branch here. - return false; + assert (RI->getValue() == LPInst && + "Resume must unwind the exception that caused control to here"); // Check that there are no other instructions except for debug intrinsics. - BasicBlock::iterator I = LPInst, E = RI; + BasicBlock::iterator I = LPInst->getIterator(), E = RI->getIterator(); while (++I != E) if (!isa(I)) return false; // Turn all invokes that unwind here into calls and delete the basic block. - bool InvokeRequiresTableEntry = false; - bool Changed = false; for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE;) { - InvokeInst *II = cast((*PI++)->getTerminator()); + BasicBlock *Pred = *PI++; + removeUnwindEdge(Pred); + } - if (II->hasFnAttr(Attribute::UWTable)) { - // Don't remove an `invoke' instruction if the ABI requires an entry into - // the table. - InvokeRequiresTableEntry = true; - continue; - } + // The landingpad is now unreachable. Zap it. + BB->eraseFromParent(); + return true; +} - SmallVector Args(II->op_begin(), II->op_end() - 3); +bool SimplifyCFGOpt::SimplifyCleanupReturn(CleanupReturnInst *RI) { + // If this is a trivial cleanup pad that executes no instructions, it can be + // eliminated. If the cleanup pad continues to the caller, any predecessor + // that is an EH pad will be updated to continue to the caller and any + // predecessor that terminates with an invoke instruction will have its invoke + // instruction converted to a call instruction. If the cleanup pad being + // simplified does not continue to the caller, each predecessor will be + // updated to continue to the unwind destination of the cleanup pad being + // simplified. + BasicBlock *BB = RI->getParent(); + CleanupPadInst *CPInst = RI->getCleanupPad(); + if (CPInst->getParent() != BB) + // This isn't an empty cleanup. + return false; - // Insert a call instruction before the invoke. - CallInst *Call = CallInst::Create(II->getCalledValue(), Args, "", II); - Call->takeName(II); - Call->setCallingConv(II->getCallingConv()); - Call->setAttributes(II->getAttributes()); - Call->setDebugLoc(II->getDebugLoc()); + // Check that there are no other instructions except for debug intrinsics. + BasicBlock::iterator I = CPInst->getIterator(), E = RI->getIterator(); + while (++I != E) + if (!isa(I)) + return false; - // Anything that used the value produced by the invoke instruction now uses - // the value produced by the call instruction. Note that we do this even - // for void functions and calls with no uses so that the callgraph edge is - // updated. - II->replaceAllUsesWith(Call); - BB->removePredecessor(II->getParent()); + // If the cleanup return we are simplifying unwinds to the caller, this will + // set UnwindDest to nullptr. + BasicBlock *UnwindDest = RI->getUnwindDest(); + Instruction *DestEHPad = UnwindDest ? UnwindDest->getFirstNonPHI() : nullptr; + + // We're about to remove BB from the control flow. Before we do, sink any + // PHINodes into the unwind destination. Doing this before changing the + // control flow avoids some potentially slow checks, since we can currently + // be certain that UnwindDest and BB have no common predecessors (since they + // are both EH pads). + if (UnwindDest) { + // First, go through the PHI nodes in UnwindDest and update any nodes that + // reference the block we are removing + for (BasicBlock::iterator I = UnwindDest->begin(), + IE = DestEHPad->getIterator(); + I != IE; ++I) { + PHINode *DestPN = cast(I); + + int Idx = DestPN->getBasicBlockIndex(BB); + // Since BB unwinds to UnwindDest, it has to be in the PHI node. + assert(Idx != -1); + // This PHI node has an incoming value that corresponds to a control + // path through the cleanup pad we are removing. If the incoming + // value is in the cleanup pad, it must be a PHINode (because we + // verified above that the block is otherwise empty). Otherwise, the + // value is either a constant or a value that dominates the cleanup + // pad being removed. + // + // Because BB and UnwindDest are both EH pads, all of their + // predecessors must unwind to these blocks, and since no instruction + // can have multiple unwind destinations, there will be no overlap in + // incoming blocks between SrcPN and DestPN. + Value *SrcVal = DestPN->getIncomingValue(Idx); + PHINode *SrcPN = dyn_cast(SrcVal); + + // Remove the entry for the block we are deleting. + DestPN->removeIncomingValue(Idx, false); + + if (SrcPN && SrcPN->getParent() == BB) { + // If the incoming value was a PHI node in the cleanup pad we are + // removing, we need to merge that PHI node's incoming values into + // DestPN. + for (unsigned SrcIdx = 0, SrcE = SrcPN->getNumIncomingValues(); + SrcIdx != SrcE; ++SrcIdx) { + DestPN->addIncoming(SrcPN->getIncomingValue(SrcIdx), + SrcPN->getIncomingBlock(SrcIdx)); + } + } else { + // Otherwise, the incoming value came from above BB and + // so we can just reuse it. We must associate all of BB's + // predecessors with this value. + for (auto *pred : predecessors(BB)) { + DestPN->addIncoming(SrcVal, pred); + } + } + } - // Insert a branch to the normal destination right before the invoke. - BranchInst::Create(II->getNormalDest(), II); + // Sink any remaining PHI nodes directly into UnwindDest. + Instruction *InsertPt = DestEHPad; + for (BasicBlock::iterator I = BB->begin(), + IE = BB->getFirstNonPHI()->getIterator(); + I != IE;) { + // The iterator must be incremented here because the instructions are + // being moved to another block. + PHINode *PN = cast(I++); + if (PN->use_empty()) + // If the PHI node has no uses, just leave it. It will be erased + // when we erase BB below. + continue; - // Finally, delete the invoke instruction! - II->eraseFromParent(); - Changed = true; + // Otherwise, sink this PHI node into UnwindDest. + // Any predecessors to UnwindDest which are not already represented + // must be back edges which inherit the value from the path through + // BB. In this case, the PHI value must reference itself. + for (auto *pred : predecessors(UnwindDest)) + if (pred != BB) + PN->addIncoming(PN, pred); + PN->moveBefore(InsertPt); + } } - if (!InvokeRequiresTableEntry) - // The landingpad is now unreachable. Zap it. - BB->eraseFromParent(); + for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE;) { + // The iterator must be updated here because we are removing this pred. + BasicBlock *PredBB = *PI++; + if (UnwindDest == nullptr) { + removeUnwindEdge(PredBB); + } else { + TerminatorInst *TI = PredBB->getTerminator(); + TI->replaceUsesOfWith(BB, UnwindDest); + } + } - return Changed; + // The cleanup pad is now unreachable. Zap it. + BB->eraseFromParent(); + return true; } bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { @@ -2921,7 +3500,7 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { } // If we eliminated all predecessors of the block, delete the block now. - if (pred_begin(BB) == pred_end(BB)) + if (pred_empty(BB)) // We know there are no successors, so just nuke the block. BB->eraseFromParent(); @@ -2950,8 +3529,8 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { // If there are any instructions immediately before the unreachable that can // be removed, do so. - while (UI != BB->begin()) { - BasicBlock::iterator BBI = UI; + while (UI->getIterator() != BB->begin()) { + BasicBlock::iterator BBI = UI->getIterator(); --BBI; // Do not delete instructions that can have side effects which might cause // the unreachable to not be reachable; specifically, calls and volatile @@ -2959,18 +3538,26 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { if (isa(BBI) && !isa(BBI)) break; if (BBI->mayHaveSideEffects()) { - if (StoreInst *SI = dyn_cast(BBI)) { + if (auto *SI = dyn_cast(BBI)) { if (SI->isVolatile()) break; - } else if (LoadInst *LI = dyn_cast(BBI)) { + } else if (auto *LI = dyn_cast(BBI)) { if (LI->isVolatile()) break; - } else if (AtomicRMWInst *RMWI = dyn_cast(BBI)) { + } else if (auto *RMWI = dyn_cast(BBI)) { if (RMWI->isVolatile()) break; - } else if (AtomicCmpXchgInst *CXI = dyn_cast(BBI)) { + } else if (auto *CXI = dyn_cast(BBI)) { if (CXI->isVolatile()) break; + } else if (isa(BBI)) { + // A catchpad may invoke exception object constructors and such, which + // in some languages can be arbitrary code, so be conservative by + // default. + // For CoreCLR, it just involves a type test, so can be removed. + if (classifyEHPersonality(BB->getParent()->getPersonalityFn()) != + EHPersonality::CoreCLR) + break; } else if (!isa(BBI) && !isa(BBI) && !isa(BBI)) { break; @@ -2996,7 +3583,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { for (unsigned i = 0, e = Preds.size(); i != e; ++i) { TerminatorInst *TI = Preds[i]->getTerminator(); IRBuilder<> Builder(TI); - if (BranchInst *BI = dyn_cast(TI)) { + if (auto *BI = dyn_cast(TI)) { if (BI->isUnconditional()) { if (BI->getSuccessor(0) == BB) { new UnreachableInst(TI->getContext(), TI); @@ -3013,7 +3600,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { Changed = true; } } - } else if (SwitchInst *SI = dyn_cast(TI)) { + } else if (auto *SI = dyn_cast(TI)) { for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) if (i.getCaseSuccessor() == BB) { @@ -3022,79 +3609,53 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { --i; --e; Changed = true; } - // If the default value is unreachable, figure out the most popular - // destination and make it the default. - if (SI->getDefaultDest() == BB) { - std::map > Popularity; - for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); - i != e; ++i) { - std::pair &entry = - Popularity[i.getCaseSuccessor()]; - if (entry.first == 0) { - entry.first = 1; - entry.second = i.getCaseIndex(); - } else { - entry.first++; - } - } + } else if (auto *II = dyn_cast(TI)) { + if (II->getUnwindDest() == BB) { + removeUnwindEdge(TI->getParent()); + Changed = true; + } + } else if (auto *CSI = dyn_cast(TI)) { + if (CSI->getUnwindDest() == BB) { + removeUnwindEdge(TI->getParent()); + Changed = true; + continue; + } - // Find the most popular block. - unsigned MaxPop = 0; - unsigned MaxIndex = 0; - BasicBlock *MaxBlock = nullptr; - for (std::map >::iterator - I = Popularity.begin(), E = Popularity.end(); I != E; ++I) { - if (I->second.first > MaxPop || - (I->second.first == MaxPop && MaxIndex > I->second.second)) { - MaxPop = I->second.first; - MaxIndex = I->second.second; - MaxBlock = I->first; - } - } - if (MaxBlock) { - // Make this the new default, allowing us to delete any explicit - // edges to it. - SI->setDefaultDest(MaxBlock); + for (CatchSwitchInst::handler_iterator I = CSI->handler_begin(), + E = CSI->handler_end(); + I != E; ++I) { + if (*I == BB) { + CSI->removeHandler(I); + --I; + --E; Changed = true; - - // If MaxBlock has phinodes in it, remove MaxPop-1 entries from - // it. - if (isa(MaxBlock->begin())) - for (unsigned i = 0; i != MaxPop-1; ++i) - MaxBlock->removePredecessor(SI->getParent()); - - for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); - i != e; ++i) - if (i.getCaseSuccessor() == MaxBlock) { - SI->removeCase(i); - --i; --e; - } } } - } else if (InvokeInst *II = dyn_cast(TI)) { - if (II->getUnwindDest() == BB) { - // Convert the invoke to a call instruction. This would be a good - // place to note that the call does not throw though. - BranchInst *BI = Builder.CreateBr(II->getNormalDest()); - II->removeFromParent(); // Take out of symbol table - - // Insert the call now... - SmallVector Args(II->op_begin(), II->op_end()-3); - Builder.SetInsertPoint(BI); - CallInst *CI = Builder.CreateCall(II->getCalledValue(), - Args, II->getName()); - CI->setCallingConv(II->getCallingConv()); - CI->setAttributes(II->getAttributes()); - // If the invoke produced a value, the call does now instead. - II->replaceAllUsesWith(CI); - delete II; + if (CSI->getNumHandlers() == 0) { + BasicBlock *CatchSwitchBB = CSI->getParent(); + if (CSI->hasUnwindDest()) { + // Redirect preds to the unwind dest + CatchSwitchBB->replaceAllUsesWith(CSI->getUnwindDest()); + } else { + // Rewrite all preds to unwind to caller (or from invoke to call). + SmallVector EHPreds(predecessors(CatchSwitchBB)); + for (BasicBlock *EHPred : EHPreds) + removeUnwindEdge(EHPred); + } + // The catchswitch is no longer reachable. + new UnreachableInst(CSI->getContext(), CSI); + CSI->eraseFromParent(); Changed = true; } + } else if (isa(TI)) { + new UnreachableInst(TI->getContext(), TI); + TI->eraseFromParent(); + Changed = true; } } // If this block is now dead, remove it. - if (pred_begin(BB) == pred_end(BB) && + if (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) { // We know there are no successors, so just nuke the block. BB->eraseFromParent(); @@ -3104,83 +3665,135 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { return Changed; } -/// TurnSwitchRangeIntoICmp - Turns a switch with that contains only a -/// integer range comparison into a sub, an icmp and a branch. -static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) { - assert(SI->getNumCases() > 1 && "Degenerate switch?"); +static bool CasesAreContiguous(SmallVectorImpl &Cases) { + assert(Cases.size() >= 1); - // Make sure all cases point to the same destination and gather the values. - SmallVector Cases; - SwitchInst::CaseIt I = SI->case_begin(); - Cases.push_back(I.getCaseValue()); - SwitchInst::CaseIt PrevI = I++; - for (SwitchInst::CaseIt E = SI->case_end(); I != E; PrevI = I++) { - if (PrevI.getCaseSuccessor() != I.getCaseSuccessor()) + array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate); + for (size_t I = 1, E = Cases.size(); I != E; ++I) { + if (Cases[I - 1]->getValue() != Cases[I]->getValue() + 1) return false; - Cases.push_back(I.getCaseValue()); } - assert(Cases.size() == SI->getNumCases() && "Not all cases gathered"); + return true; +} - // Sort the case values, then check if they form a range we can transform. - array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate); - for (unsigned I = 1, E = Cases.size(); I != E; ++I) { - if (Cases[I-1]->getValue() != Cases[I]->getValue()+1) - return false; +/// Turn a switch with two reachable destinations into an integer range +/// comparison and branch. +static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) { + assert(SI->getNumCases() > 1 && "Degenerate switch?"); + + bool HasDefault = + !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); + + // Partition the cases into two sets with different destinations. + BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr; + BasicBlock *DestB = nullptr; + SmallVector CasesA; + SmallVector CasesB; + + for (SwitchInst::CaseIt I : SI->cases()) { + BasicBlock *Dest = I.getCaseSuccessor(); + if (!DestA) DestA = Dest; + if (Dest == DestA) { + CasesA.push_back(I.getCaseValue()); + continue; + } + if (!DestB) DestB = Dest; + if (Dest == DestB) { + CasesB.push_back(I.getCaseValue()); + continue; + } + return false; // More than two destinations. } - Constant *Offset = ConstantExpr::getNeg(Cases.back()); - Constant *NumCases = ConstantInt::get(Offset->getType(), SI->getNumCases()); + assert(DestA && DestB && "Single-destination switch should have been folded."); + assert(DestA != DestB); + assert(DestB != SI->getDefaultDest()); + assert(!CasesB.empty() && "There must be non-default cases."); + assert(!CasesA.empty() || HasDefault); + + // Figure out if one of the sets of cases form a contiguous range. + SmallVectorImpl *ContiguousCases = nullptr; + BasicBlock *ContiguousDest = nullptr; + BasicBlock *OtherDest = nullptr; + if (!CasesA.empty() && CasesAreContiguous(CasesA)) { + ContiguousCases = &CasesA; + ContiguousDest = DestA; + OtherDest = DestB; + } else if (CasesAreContiguous(CasesB)) { + ContiguousCases = &CasesB; + ContiguousDest = DestB; + OtherDest = DestA; + } else + return false; + + // Start building the compare and branch. + + Constant *Offset = ConstantExpr::getNeg(ContiguousCases->back()); + Constant *NumCases = ConstantInt::get(Offset->getType(), ContiguousCases->size()); Value *Sub = SI->getCondition(); if (!Offset->isNullValue()) - Sub = Builder.CreateAdd(Sub, Offset, Sub->getName()+".off"); + Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off"); + Value *Cmp; // If NumCases overflowed, then all possible values jump to the successor. - if (NumCases->isNullValue() && SI->getNumCases() != 0) + if (NumCases->isNullValue() && !ContiguousCases->empty()) Cmp = ConstantInt::getTrue(SI->getContext()); else Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); - BranchInst *NewBI = Builder.CreateCondBr( - Cmp, SI->case_begin().getCaseSuccessor(), SI->getDefaultDest()); + BranchInst *NewBI = Builder.CreateCondBr(Cmp, ContiguousDest, OtherDest); // Update weight for the newly-created conditional branch. - SmallVector Weights; - bool HasWeights = HasBranchWeights(SI); - if (HasWeights) { + if (HasBranchWeights(SI)) { + SmallVector Weights; GetBranchWeights(SI, Weights); if (Weights.size() == 1 + SI->getNumCases()) { - // Combine all weights for the cases to be the true weight of NewBI. - // We assume that the sum of all weights for a Terminator can fit into 32 - // bits. - uint32_t NewTrueWeight = 0; - for (unsigned I = 1, E = Weights.size(); I != E; ++I) - NewTrueWeight += (uint32_t)Weights[I]; + uint64_t TrueWeight = 0; + uint64_t FalseWeight = 0; + for (size_t I = 0, E = Weights.size(); I != E; ++I) { + if (SI->getSuccessor(I) == ContiguousDest) + TrueWeight += Weights[I]; + else + FalseWeight += Weights[I]; + } + while (TrueWeight > UINT32_MAX || FalseWeight > UINT32_MAX) { + TrueWeight /= 2; + FalseWeight /= 2; + } NewBI->setMetadata(LLVMContext::MD_prof, - MDBuilder(SI->getContext()). - createBranchWeights(NewTrueWeight, - (uint32_t)Weights[0])); + MDBuilder(SI->getContext()).createBranchWeights( + (uint32_t)TrueWeight, (uint32_t)FalseWeight)); } } - // Prune obsolete incoming values off the successor's PHI nodes. - for (BasicBlock::iterator BBI = SI->case_begin().getCaseSuccessor()->begin(); - isa(BBI); ++BBI) { - for (unsigned I = 0, E = SI->getNumCases()-1; I != E; ++I) + // Prune obsolete incoming values off the successors' PHI nodes. + for (auto BBI = ContiguousDest->begin(); isa(BBI); ++BBI) { + unsigned PreviousEdges = ContiguousCases->size(); + if (ContiguousDest == SI->getDefaultDest()) ++PreviousEdges; + for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) + cast(BBI)->removeIncomingValue(SI->getParent()); + } + for (auto BBI = OtherDest->begin(); isa(BBI); ++BBI) { + unsigned PreviousEdges = SI->getNumCases() - ContiguousCases->size(); + if (OtherDest == SI->getDefaultDest()) ++PreviousEdges; + for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) cast(BBI)->removeIncomingValue(SI->getParent()); } + + // Drop the switch. SI->eraseFromParent(); return true; } -/// EliminateDeadSwitchCases - Compute masked bits for the condition of a switch +/// Compute masked bits for the condition of a switch /// and use it to remove dead cases. -static bool EliminateDeadSwitchCases(SwitchInst *SI, const DataLayout *DL, - AssumptionTracker *AT) { +static bool EliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC, + const DataLayout &DL) { Value *Cond = SI->getCondition(); unsigned Bits = Cond->getType()->getIntegerBitWidth(); APInt KnownZero(Bits, 0), KnownOne(Bits, 0); - computeKnownBits(Cond, KnownZero, KnownOne, DL, 0, AT, SI); + computeKnownBits(Cond, KnownZero, KnownOne, DL, 0, AC, SI); // Gather dead cases. SmallVector DeadCases; @@ -3193,6 +3806,29 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI, const DataLayout *DL, } } + // If we can prove that the cases must cover all possible values, the + // default destination becomes dead and we can remove it. If we know some + // of the bits in the value, we can use that to more precisely compute the + // number of possible unique case values. + bool HasDefault = + !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); + const unsigned NumUnknownBits = Bits - + (KnownZero.Or(KnownOne)).countPopulation(); + assert(NumUnknownBits <= Bits); + if (HasDefault && DeadCases.empty() && + NumUnknownBits < 64 /* avoid overflow */ && + SI->getNumCases() == (1ULL << NumUnknownBits)) { + DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n"); + BasicBlock *NewDefault = SplitBlockPredecessors(SI->getDefaultDest(), + SI->getParent(), ""); + SI->setDefaultDest(&*NewDefault); + SplitBlock(&*NewDefault, &NewDefault->front()); + auto *OldTI = NewDefault->getTerminator(); + new UnreachableInst(SI->getContext(), OldTI); + EraseTerminatorInstAndDCECond(OldTI); + return true; + } + SmallVector Weights; bool HasWeight = HasBranchWeights(SI); if (HasWeight) { @@ -3224,8 +3860,8 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI, const DataLayout *DL, return !DeadCases.empty(); } -/// FindPHIForConditionForwarding - If BB would be eligible for simplification -/// by TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated +/// If BB would be eligible for simplification by +/// TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated /// by an unconditional branch), look at the phi node for BB in the successor /// block and see if the incoming value is equal to CaseValue. If so, return /// the phi node, and set PhiIndex to BB's index in the phi node. @@ -3258,9 +3894,9 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue, return nullptr; } -/// ForwardSwitchConditionToPHI - Try to forward the condition of a switch -/// instruction to a phi node dominated by the switch, if that would mean that -/// some of the destination blocks of the switch can be folded away. +/// Try to forward the condition of a switch instruction to a phi node +/// dominated by the switch, if that would mean that some of the destination +/// blocks of the switch can be folded away. /// Returns true if a change is made. static bool ForwardSwitchConditionToPHI(SwitchInst *SI) { typedef DenseMap > ForwardingNodesMap; @@ -3295,7 +3931,7 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) { return Changed; } -/// ValidLookupTableConstant - Return true if the backend will be able to handle +/// Return true if the backend will be able to handle /// initializing an array of constants like C. static bool ValidLookupTableConstant(Constant *C) { if (C->isThreadDependent()) @@ -3313,7 +3949,7 @@ static bool ValidLookupTableConstant(Constant *C) { isa(C); } -/// LookupConstant - If V is a Constant, return it. Otherwise, try to look up +/// If V is a Constant, return it. Otherwise, try to look up /// its constant value in ConstantPool, returning 0 if it's not there. static Constant *LookupConstant(Value *V, const SmallDenseMap& ConstantPool) { @@ -3322,14 +3958,13 @@ static Constant *LookupConstant(Value *V, return ConstantPool.lookup(V); } -/// ConstantFold - Try to fold instruction I into a constant. This works for +/// Try to fold instruction I into a constant. This works for /// simple instructions such as binary operations where both operands are /// constant or can be replaced by constants from the ConstantPool. Returns the /// resulting constant on success, 0 otherwise. static Constant * -ConstantFold(Instruction *I, - const SmallDenseMap &ConstantPool, - const DataLayout *DL) { +ConstantFold(Instruction *I, const DataLayout &DL, + const SmallDenseMap &ConstantPool) { if (SelectInst *Select = dyn_cast(I)) { Constant *A = LookupConstant(Select->getCondition(), ConstantPool); if (!A) @@ -3349,24 +3984,23 @@ ConstantFold(Instruction *I, return nullptr; } - if (CmpInst *Cmp = dyn_cast(I)) + if (CmpInst *Cmp = dyn_cast(I)) { return ConstantFoldCompareInstOperands(Cmp->getPredicate(), COps[0], COps[1], DL); + } return ConstantFoldInstOperands(I->getOpcode(), I->getType(), COps, DL); } -/// GetCaseResults - Try to determine the resulting constant values in phi nodes +/// Try to determine the resulting constant values in phi nodes /// at the common destination basic block, *CommonDest, for one of the case /// destionations CaseDest corresponding to value CaseVal (0 for the default /// case), of a switch instruction SI. static bool -GetCaseResults(SwitchInst *SI, - ConstantInt *CaseVal, - BasicBlock *CaseDest, +GetCaseResults(SwitchInst *SI, ConstantInt *CaseVal, BasicBlock *CaseDest, BasicBlock **CommonDest, - SmallVectorImpl > &Res, - const DataLayout *DL) { + SmallVectorImpl> &Res, + const DataLayout &DL) { // The block from which we enter the common destination. BasicBlock *Pred = SI->getParent(); @@ -3385,9 +4019,24 @@ GetCaseResults(SwitchInst *SI, } else if (isa(I)) { // Skip debug intrinsic. continue; - } else if (Constant *C = ConstantFold(I, ConstantPool, DL)) { + } else if (Constant *C = ConstantFold(&*I, DL, ConstantPool)) { // Instruction is side-effect free and constant. - ConstantPool.insert(std::make_pair(I, C)); + + // If the instruction has uses outside this block or a phi node slot for + // the block, it is not safe to bypass the instruction since it would then + // no longer dominate all its uses. + for (auto &Use : I->uses()) { + User *User = Use.getUser(); + if (Instruction *I = dyn_cast(User)) + if (I->getParent() == CaseDest) + continue; + if (PHINode *Phi = dyn_cast(User)) + if (Phi->getIncomingBlock(Use) == CaseDest) + continue; + return false; + } + + ConstantPool.insert(std::make_pair(&*I, C)); } else { break; } @@ -3412,12 +4061,6 @@ GetCaseResults(SwitchInst *SI, if (!ConstVal) return false; - // Note: If the constant comes from constant-propagating the case value - // through the CaseDest basic block, it will be safe to remove the - // instructions in that block. They cannot be used (except in the phi nodes - // we visit) outside CaseDest, because that block does not dominate its - // successor. If it did, we would not be in this phi node. - // Be conservative about which kinds of constants we support. if (!ValidLookupTableConstant(ConstVal)) return false; @@ -3428,30 +4071,180 @@ GetCaseResults(SwitchInst *SI, return Res.size() > 0; } +// Helper function used to add CaseVal to the list of cases that generate +// Result. +static void MapCaseToResult(ConstantInt *CaseVal, + SwitchCaseResultVectorTy &UniqueResults, + Constant *Result) { + for (auto &I : UniqueResults) { + if (I.first == Result) { + I.second.push_back(CaseVal); + return; + } + } + UniqueResults.push_back(std::make_pair(Result, + SmallVector(1, CaseVal))); +} + +// Helper function that initializes a map containing +// results for the PHI node of the common destination block for a switch +// instruction. Returns false if multiple PHI nodes have been found or if +// there is not a common destination block for the switch. +static bool InitializeUniqueCases(SwitchInst *SI, PHINode *&PHI, + BasicBlock *&CommonDest, + SwitchCaseResultVectorTy &UniqueResults, + Constant *&DefaultResult, + const DataLayout &DL) { + for (auto &I : SI->cases()) { + ConstantInt *CaseVal = I.getCaseValue(); + + // Resulting value at phi nodes for this case value. + SwitchCaseResultsTy Results; + if (!GetCaseResults(SI, CaseVal, I.getCaseSuccessor(), &CommonDest, Results, + DL)) + return false; + + // Only one value per case is permitted + if (Results.size() > 1) + return false; + MapCaseToResult(CaseVal, UniqueResults, Results.begin()->second); + + // Check the PHI consistency. + if (!PHI) + PHI = Results[0].first; + else if (PHI != Results[0].first) + return false; + } + // Find the default result value. + SmallVector, 1> DefaultResults; + BasicBlock *DefaultDest = SI->getDefaultDest(); + GetCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest, DefaultResults, + DL); + // If the default value is not found abort unless the default destination + // is unreachable. + DefaultResult = + DefaultResults.size() == 1 ? DefaultResults.begin()->second : nullptr; + if ((!DefaultResult && + !isa(DefaultDest->getFirstNonPHIOrDbg()))) + return false; + + return true; +} + +// Helper function that checks if it is possible to transform a switch with only +// two cases (or two cases + default) that produces a result into a select. +// Example: +// switch (a) { +// case 10: %0 = icmp eq i32 %a, 10 +// return 10; %1 = select i1 %0, i32 10, i32 4 +// case 20: ----> %2 = icmp eq i32 %a, 20 +// return 2; %3 = select i1 %2, i32 2, i32 %1 +// default: +// return 4; +// } +static Value * +ConvertTwoCaseSwitch(const SwitchCaseResultVectorTy &ResultVector, + Constant *DefaultResult, Value *Condition, + IRBuilder<> &Builder) { + assert(ResultVector.size() == 2 && + "We should have exactly two unique results at this point"); + // If we are selecting between only two cases transform into a simple + // select or a two-way select if default is possible. + if (ResultVector[0].second.size() == 1 && + ResultVector[1].second.size() == 1) { + ConstantInt *const FirstCase = ResultVector[0].second[0]; + ConstantInt *const SecondCase = ResultVector[1].second[0]; + + bool DefaultCanTrigger = DefaultResult; + Value *SelectValue = ResultVector[1].first; + if (DefaultCanTrigger) { + Value *const ValueCompare = + Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp"); + SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first, + DefaultResult, "switch.select"); + } + Value *const ValueCompare = + Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp"); + return Builder.CreateSelect(ValueCompare, ResultVector[0].first, SelectValue, + "switch.select"); + } + + return nullptr; +} + +// Helper function to cleanup a switch instruction that has been converted into +// a select, fixing up PHI nodes and basic blocks. +static void RemoveSwitchAfterSelectConversion(SwitchInst *SI, PHINode *PHI, + Value *SelectValue, + IRBuilder<> &Builder) { + BasicBlock *SelectBB = SI->getParent(); + while (PHI->getBasicBlockIndex(SelectBB) >= 0) + PHI->removeIncomingValue(SelectBB); + PHI->addIncoming(SelectValue, SelectBB); + + Builder.CreateBr(PHI->getParent()); + + // Remove the switch. + for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) { + BasicBlock *Succ = SI->getSuccessor(i); + + if (Succ == PHI->getParent()) + continue; + Succ->removePredecessor(SelectBB); + } + SI->eraseFromParent(); +} + +/// If the switch is only used to initialize one or more +/// phi nodes in a common successor block with only two different +/// constant values, replace the switch with select. +static bool SwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder, + AssumptionCache *AC, const DataLayout &DL) { + Value *const Cond = SI->getCondition(); + PHINode *PHI = nullptr; + BasicBlock *CommonDest = nullptr; + Constant *DefaultResult; + SwitchCaseResultVectorTy UniqueResults; + // Collect all the cases that will deliver the same value from the switch. + if (!InitializeUniqueCases(SI, PHI, CommonDest, UniqueResults, DefaultResult, + DL)) + return false; + // Selects choose between maximum two values. + if (UniqueResults.size() != 2) + return false; + assert(PHI != nullptr && "PHI for value select not found"); + + Builder.SetInsertPoint(SI); + Value *SelectValue = ConvertTwoCaseSwitch( + UniqueResults, + DefaultResult, Cond, Builder); + if (SelectValue) { + RemoveSwitchAfterSelectConversion(SI, PHI, SelectValue, Builder); + return true; + } + // The switch couldn't be converted into a select. + return false; +} + namespace { - /// SwitchLookupTable - This class represents a lookup table that can be used - /// to replace a switch. + /// This class represents a lookup table that can be used to replace a switch. class SwitchLookupTable { public: - /// SwitchLookupTable - Create a lookup table to use as a switch replacement - /// with the contents of Values, using DefaultValue to fill any holes in the - /// table. - SwitchLookupTable(Module &M, - uint64_t TableSize, - ConstantInt *Offset, - const SmallVectorImpl >& Values, - Constant *DefaultValue, - const DataLayout *DL); - - /// BuildLookup - Build instructions with Builder to retrieve the value at + /// Create a lookup table to use as a switch replacement with the contents + /// of Values, using DefaultValue to fill any holes in the table. + SwitchLookupTable( + Module &M, uint64_t TableSize, ConstantInt *Offset, + const SmallVectorImpl> &Values, + Constant *DefaultValue, const DataLayout &DL); + + /// Build instructions with Builder to retrieve the value at /// the position given by Index in the lookup table. Value *BuildLookup(Value *Index, IRBuilder<> &Builder); - /// WouldFitInRegister - Return true if a table with TableSize elements of + /// Return true if a table with TableSize elements of /// type ElementType would fit in a target-legal register. - static bool WouldFitInRegister(const DataLayout *DL, - uint64_t TableSize, - const Type *ElementType); + static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize, + Type *ElementType); private: // Depending on the contents of the table, it can be represented in @@ -3461,6 +4254,11 @@ namespace { // store that single value and return it for each lookup. SingleValueKind, + // For tables where there is a linear relationship between table index + // and values. We calculate the result with a simple multiplication + // and addition instead of a table lookup. + LinearMapKind, + // For small tables with integer elements, we can pack them into a bitmap // that fits into a target-legal register. Values are retrieved by // shift and mask operations. @@ -3478,19 +4276,21 @@ namespace { ConstantInt *BitMap; IntegerType *BitMapElementTy; + // For LinearMapKind, these are the constants used to derive the value. + ConstantInt *LinearOffset; + ConstantInt *LinearMultiplier; + // For ArrayKind, this is the array. GlobalVariable *Array; }; } -SwitchLookupTable::SwitchLookupTable(Module &M, - uint64_t TableSize, - ConstantInt *Offset, - const SmallVectorImpl >& Values, - Constant *DefaultValue, - const DataLayout *DL) +SwitchLookupTable::SwitchLookupTable( + Module &M, uint64_t TableSize, ConstantInt *Offset, + const SmallVectorImpl> &Values, + Constant *DefaultValue, const DataLayout &DL) : SingleValue(nullptr), BitMap(nullptr), BitMapElementTy(nullptr), - Array(nullptr) { + LinearOffset(nullptr), LinearMultiplier(nullptr), Array(nullptr) { assert(Values.size() && "Can't build lookup table without values!"); assert(TableSize >= Values.size() && "Can't fit values in table!"); @@ -3535,6 +4335,43 @@ SwitchLookupTable::SwitchLookupTable(Module &M, return; } + // Check if we can derive the value with a linear transformation from the + // table index. + if (isa(ValueType)) { + bool LinearMappingPossible = true; + APInt PrevVal; + APInt DistToPrev; + assert(TableSize >= 2 && "Should be a SingleValue table."); + // Check if there is the same distance between two consecutive values. + for (uint64_t I = 0; I < TableSize; ++I) { + ConstantInt *ConstVal = dyn_cast(TableContents[I]); + if (!ConstVal) { + // This is an undef. We could deal with it, but undefs in lookup tables + // are very seldom. It's probably not worth the additional complexity. + LinearMappingPossible = false; + break; + } + APInt Val = ConstVal->getValue(); + if (I != 0) { + APInt Dist = Val - PrevVal; + if (I == 1) { + DistToPrev = Dist; + } else if (Dist != DistToPrev) { + LinearMappingPossible = false; + break; + } + } + PrevVal = Val; + } + if (LinearMappingPossible) { + LinearOffset = cast(TableContents[0]); + LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev); + Kind = LinearMapKind; + ++NumLinearMaps; + return; + } + } + // If the type is integer and the table fits in a register, build a bitmap. if (WouldFitInRegister(DL, TableSize, ValueType)) { IntegerType *IT = cast(ValueType); @@ -3570,6 +4407,16 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) { switch (Kind) { case SingleValueKind: return SingleValue; + case LinearMapKind: { + // Derive the result value from the input value. + Value *Result = Builder.CreateIntCast(Index, LinearMultiplier->getType(), + false, "switch.idx.cast"); + if (!LinearMultiplier->isOne()) + Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult"); + if (!LinearOffset->isZero()) + Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset"); + return Result; + } case BitMapKind: { // Type of the bitmap (e.g. i59). IntegerType *MapTy = BitMap->getType(); @@ -3603,20 +4450,18 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) { "switch.tableidx.zext"); Value *GEPIndices[] = { Builder.getInt32(0), Index }; - Value *GEP = Builder.CreateInBoundsGEP(Array, GEPIndices, - "switch.gep"); + Value *GEP = Builder.CreateInBoundsGEP(Array->getValueType(), Array, + GEPIndices, "switch.gep"); return Builder.CreateLoad(GEP, "switch.load"); } } llvm_unreachable("Unknown lookup table kind!"); } -bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL, +bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL, uint64_t TableSize, - const Type *ElementType) { - if (!DL) - return false; - const IntegerType *IT = dyn_cast(ElementType); + Type *ElementType) { + auto *IT = dyn_cast(ElementType); if (!IT) return false; // FIXME: If the type is wider than it needs to be, e.g. i8 but all values @@ -3625,25 +4470,22 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL, // Avoid overflow, fitsInLegalInteger uses unsigned int for the width. if (TableSize >= UINT_MAX/IT->getBitWidth()) return false; - return DL->fitsInLegalInteger(TableSize * IT->getBitWidth()); + return DL.fitsInLegalInteger(TableSize * IT->getBitWidth()); } -/// ShouldBuildLookupTable - Determine whether a lookup table should be built -/// for this switch, based on the number of cases, size of the table and the -/// types of the results. -static bool ShouldBuildLookupTable(SwitchInst *SI, - uint64_t TableSize, - const TargetTransformInfo &TTI, - const DataLayout *DL, - const SmallDenseMap& ResultTypes) { +/// Determine whether a lookup table should be built for this switch, based on +/// the number of cases, size of the table, and the types of the results. +static bool +ShouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize, + const TargetTransformInfo &TTI, const DataLayout &DL, + const SmallDenseMap &ResultTypes) { if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10) return false; // TableSize overflowed, or mul below might overflow. bool AllTablesFitInRegister = true; bool HasIllegalType = false; - for (SmallDenseMap::const_iterator I = ResultTypes.begin(), - E = ResultTypes.end(); I != E; ++I) { - Type *Ty = I->second; + for (const auto &I : ResultTypes) { + Type *Ty = I.second; // Saturate this flag to true. HasIllegalType = HasIllegalType || !TTI.isTypeLegal(Ty); @@ -3673,13 +4515,95 @@ static bool ShouldBuildLookupTable(SwitchInst *SI, return SI->getNumCases() * 10 >= TableSize * 4; } -/// SwitchToLookupTable - If the switch is only used to initialize one or more -/// phi nodes in a common successor block with different constant values, -/// replace the switch with lookup tables. -static bool SwitchToLookupTable(SwitchInst *SI, - IRBuilder<> &Builder, - const TargetTransformInfo &TTI, - const DataLayout* DL) { +/// Try to reuse the switch table index compare. Following pattern: +/// \code +/// if (idx < tablesize) +/// r = table[idx]; // table does not contain default_value +/// else +/// r = default_value; +/// if (r != default_value) +/// ... +/// \endcode +/// Is optimized to: +/// \code +/// cond = idx < tablesize; +/// if (cond) +/// r = table[idx]; +/// else +/// r = default_value; +/// if (cond) +/// ... +/// \endcode +/// Jump threading will then eliminate the second if(cond). +static void reuseTableCompare(User *PhiUser, BasicBlock *PhiBlock, + BranchInst *RangeCheckBranch, Constant *DefaultValue, + const SmallVectorImpl >& Values) { + + ICmpInst *CmpInst = dyn_cast(PhiUser); + if (!CmpInst) + return; + + // We require that the compare is in the same block as the phi so that jump + // threading can do its work afterwards. + if (CmpInst->getParent() != PhiBlock) + return; + + Constant *CmpOp1 = dyn_cast(CmpInst->getOperand(1)); + if (!CmpOp1) + return; + + Value *RangeCmp = RangeCheckBranch->getCondition(); + Constant *TrueConst = ConstantInt::getTrue(RangeCmp->getType()); + Constant *FalseConst = ConstantInt::getFalse(RangeCmp->getType()); + + // Check if the compare with the default value is constant true or false. + Constant *DefaultConst = ConstantExpr::getICmp(CmpInst->getPredicate(), + DefaultValue, CmpOp1, true); + if (DefaultConst != TrueConst && DefaultConst != FalseConst) + return; + + // Check if the compare with the case values is distinct from the default + // compare result. + for (auto ValuePair : Values) { + Constant *CaseConst = ConstantExpr::getICmp(CmpInst->getPredicate(), + ValuePair.second, CmpOp1, true); + if (!CaseConst || CaseConst == DefaultConst) + return; + assert((CaseConst == TrueConst || CaseConst == FalseConst) && + "Expect true or false as compare result."); + } + + // Check if the branch instruction dominates the phi node. It's a simple + // dominance check, but sufficient for our needs. + // Although this check is invariant in the calling loops, it's better to do it + // at this late stage. Practically we do it at most once for a switch. + BasicBlock *BranchBlock = RangeCheckBranch->getParent(); + for (auto PI = pred_begin(PhiBlock), E = pred_end(PhiBlock); PI != E; ++PI) { + BasicBlock *Pred = *PI; + if (Pred != BranchBlock && Pred->getUniquePredecessor() != BranchBlock) + return; + } + + if (DefaultConst == FalseConst) { + // The compare yields the same result. We can replace it. + CmpInst->replaceAllUsesWith(RangeCmp); + ++NumTableCmpReuses; + } else { + // The compare yields the same result, just inverted. We can replace it. + Value *InvertedTableCmp = BinaryOperator::CreateXor(RangeCmp, + ConstantInt::get(RangeCmp->getType(), 1), "inverted.cmp", + RangeCheckBranch); + CmpInst->replaceAllUsesWith(InvertedTableCmp); + ++NumTableCmpReuses; + } +} + +/// If the switch is only used to initialize one or more phi nodes in a common +/// successor block with different constant values, replace the switch with +/// lookup tables. +static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder, + const DataLayout &DL, + const TargetTransformInfo &TTI) { assert(SI->getNumCases() > 1 && "Degenerate switch?"); // Only build lookup table when we have a target that supports it. @@ -3699,7 +4623,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, return false; // Figure out the corresponding result for each case value and phi node in the - // common destination, as well as the the min and max case values. + // common destination, as well as the min and max case values. assert(SI->case_begin() != SI->case_end()); SwitchInst::CaseIt CI = SI->case_begin(); ConstantInt *MinCaseVal = CI.getCaseValue(); @@ -3727,16 +4651,17 @@ static bool SwitchToLookupTable(SwitchInst *SI, return false; // Append the result from this case to the list for each phi. - for (ResultsTy::iterator I = Results.begin(), E = Results.end(); I!=E; ++I) { - if (!ResultLists.count(I->first)) - PHIs.push_back(I->first); - ResultLists[I->first].push_back(std::make_pair(CaseVal, I->second)); + for (const auto &I : Results) { + PHINode *PHI = I.first; + Constant *Value = I.second; + if (!ResultLists.count(PHI)) + PHIs.push_back(PHI); + ResultLists[PHI].push_back(std::make_pair(CaseVal, Value)); } } // Keep track of the result types. - for (size_t I = 0, E = PHIs.size(); I != E; ++I) { - PHINode *PHI = PHIs[I]; + for (PHINode *PHI : PHIs) { ResultTypes[PHI] = ResultLists[PHI][0].second->getType(); } @@ -3748,23 +4673,21 @@ static bool SwitchToLookupTable(SwitchInst *SI, // If the table has holes, we need a constant result for the default case // or a bitmask that fits in a register. SmallVector, 4> DefaultResultsList; - bool HasDefaultResults = false; - if (TableHasHoles) { - HasDefaultResults = GetCaseResults(SI, nullptr, SI->getDefaultDest(), - &CommonDest, DefaultResultsList, DL); - } + bool HasDefaultResults = GetCaseResults(SI, nullptr, SI->getDefaultDest(), + &CommonDest, DefaultResultsList, DL); + bool NeedMask = (TableHasHoles && !HasDefaultResults); if (NeedMask) { // As an extra penalty for the validity test we require more cases. if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark). return false; - if (!(DL && DL->fitsInLegalInteger(TableSize))) + if (!DL.fitsInLegalInteger(TableSize)) return false; } - for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) { - PHINode *PHI = DefaultResultsList[I].first; - Constant *Result = DefaultResultsList[I].second; + for (const auto &I : DefaultResultsList) { + PHINode *PHI = I.first; + Constant *Result = I.second; DefaultResults[PHI] = Result; } @@ -3791,21 +4714,22 @@ static bool SwitchToLookupTable(SwitchInst *SI, "It is impossible for a switch to have more entries than the max " "representable value of its input integer type's size."); - // If we have a fully covered lookup table, unconditionally branch to the - // lookup table BB. Otherwise, check if the condition value is within the case - // range. If it is so, branch to the new BB. Otherwise branch to SI's default - // destination. - const bool GeneratingCoveredLookupTable = MaxTableSize == TableSize; - if (GeneratingCoveredLookupTable) { + // If the default destination is unreachable, or if the lookup table covers + // all values of the conditional variable, branch directly to the lookup table + // BB. Otherwise, check that the condition is within the case range. + const bool DefaultIsReachable = + !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); + const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize); + BranchInst *RangeCheckBranch = nullptr; + + if (!DefaultIsReachable || GeneratingCoveredLookupTable) { Builder.CreateBr(LookupBB); - // We cached PHINodes in PHIs, to avoid accessing deleted PHINodes later, - // do not delete PHINodes here. - SI->getDefaultDest()->removePredecessor(SI->getParent(), - true/*DontDeleteUselessPHIs*/); + // Note: We call removeProdecessor later since we need to be able to get the + // PHI value for the default case in case we're using a bit mask. } else { Value *Cmp = Builder.CreateICmpULT(TableIndex, ConstantInt::get( MinCaseVal->getType(), TableSize)); - Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest()); + RangeCheckBranch = Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest()); } // Populate the BB that does the lookups. @@ -3822,9 +4746,12 @@ static bool SwitchToLookupTable(SwitchInst *SI, CommonDest->getParent(), CommonDest); + // Make the mask's bitwidth at least 8bit and a power-of-2 to avoid + // unnecessary illegal types. + uint64_t TableSizePowOf2 = NextPowerOf2(std::max(7ULL, TableSize - 1ULL)); + APInt MaskInt(TableSizePowOf2, 0); + APInt One(TableSizePowOf2, 1); // Build bitmask; fill in a 1 bit for every case. - APInt MaskInt(TableSize, 0); - APInt One(TableSize, 1); const ResultListTy &ResultList = ResultLists[PHIs[0]]; for (size_t I = 0, E = ResultList.size(); I != E; ++I) { uint64_t Idx = (ResultList[I].first->getValue() - @@ -3850,14 +4777,21 @@ static bool SwitchToLookupTable(SwitchInst *SI, AddPredecessorToBlock(SI->getDefaultDest(), MaskBB, SI->getParent()); } + if (!DefaultIsReachable || GeneratingCoveredLookupTable) { + // We cached PHINodes in PHIs, to avoid accessing deleted PHINodes later, + // do not delete PHINodes here. + SI->getDefaultDest()->removePredecessor(SI->getParent(), + /*DontDeleteUselessPHIs=*/true); + } + bool ReturnedEarly = false; for (size_t I = 0, E = PHIs.size(); I != E; ++I) { PHINode *PHI = PHIs[I]; + const ResultListTy &ResultList = ResultLists[PHI]; // If using a bitmask, use any value to fill the lookup table holes. Constant *DV = NeedMask ? ResultLists[PHI][0].second : DefaultResults[PHI]; - SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI], - DV, DL); + SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultList, DV, DL); Value *Result = Table.BuildLookup(TableIndex, Builder); @@ -3870,6 +4804,16 @@ static bool SwitchToLookupTable(SwitchInst *SI, break; } + // Do a small peephole optimization: re-use the switch table compare if + // possible. + if (!TableHasHoles && HasDefaultResults && RangeCheckBranch) { + BasicBlock *PhiBlock = PHI->getParent(); + // Search for compare instructions which use the phi. + for (auto *User : PHI->users()) { + reuseTableCompare(User, PhiBlock, RangeCheckBranch, DV, ResultList); + } + } + PHI->addIncoming(Result, LookupBB); } @@ -3900,12 +4844,12 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { // see if that predecessor totally determines the outcome of this switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; Value *Cond = SI->getCondition(); if (SelectInst *Select = dyn_cast(Cond)) if (SimplifySwitchOnSelect(SI, Select)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; // If the block only contains the switch, see if we can fold the block // away into any preds. @@ -3915,22 +4859,25 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { ++BBI; if (SI == &*BBI) if (FoldValueComparisonIntoPredecessors(SI, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } // Try to transform the switch into an icmp and a branch. if (TurnSwitchRangeIntoICmp(SI, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; // Remove unreachable cases. - if (EliminateDeadSwitchCases(SI, DL, AT)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (EliminateDeadSwitchCases(SI, AC, DL)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; + + if (SwitchToSelect(SI, Builder, AC, DL)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; if (ForwardSwitchConditionToPHI(SI)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; - if (SwitchToLookupTable(SI, Builder, TTI, DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (SwitchToLookupTable(SI, Builder, DL, TTI)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; return false; } @@ -3943,7 +4890,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { SmallPtrSet Succs; for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { BasicBlock *Dest = IBI->getDestination(i); - if (!Dest->hasAddressTaken() || !Succs.insert(Dest)) { + if (!Dest->hasAddressTaken() || !Succs.insert(Dest).second) { Dest->removePredecessor(BB); IBI->removeDestination(i); --i; --e; @@ -3967,11 +4914,87 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { if (SelectInst *SI = dyn_cast(IBI->getAddress())) { if (SimplifyIndirectBrOnSelect(IBI, SI)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } return Changed; } +/// Given an block with only a single landing pad and a unconditional branch +/// try to find another basic block which this one can be merged with. This +/// handles cases where we have multiple invokes with unique landing pads, but +/// a shared handler. +/// +/// We specifically choose to not worry about merging non-empty blocks +/// here. That is a PRE/scheduling problem and is best solved elsewhere. In +/// practice, the optimizer produces empty landing pad blocks quite frequently +/// when dealing with exception dense code. (see: instcombine, gvn, if-else +/// sinking in this file) +/// +/// This is primarily a code size optimization. We need to avoid performing +/// any transform which might inhibit optimization (such as our ability to +/// specialize a particular handler via tail commoning). We do this by not +/// merging any blocks which require us to introduce a phi. Since the same +/// values are flowing through both blocks, we don't loose any ability to +/// specialize. If anything, we make such specialization more likely. +/// +/// TODO - This transformation could remove entries from a phi in the target +/// block when the inputs in the phi are the same for the two blocks being +/// merged. In some cases, this could result in removal of the PHI entirely. +static bool TryToMergeLandingPad(LandingPadInst *LPad, BranchInst *BI, + BasicBlock *BB) { + auto Succ = BB->getUniqueSuccessor(); + assert(Succ); + // If there's a phi in the successor block, we'd likely have to introduce + // a phi into the merged landing pad block. + if (isa(*Succ->begin())) + return false; + + for (BasicBlock *OtherPred : predecessors(Succ)) { + if (BB == OtherPred) + continue; + BasicBlock::iterator I = OtherPred->begin(); + LandingPadInst *LPad2 = dyn_cast(I); + if (!LPad2 || !LPad2->isIdenticalTo(LPad)) + continue; + for (++I; isa(I); ++I) {} + BranchInst *BI2 = dyn_cast(I); + if (!BI2 || !BI2->isIdenticalTo(BI)) + continue; + + // We've found an identical block. Update our predeccessors to take that + // path instead and make ourselves dead. + SmallSet Preds; + Preds.insert(pred_begin(BB), pred_end(BB)); + for (BasicBlock *Pred : Preds) { + InvokeInst *II = cast(Pred->getTerminator()); + assert(II->getNormalDest() != BB && + II->getUnwindDest() == BB && "unexpected successor"); + II->setUnwindDest(OtherPred); + } + + // The debug info in OtherPred doesn't cover the merged control flow that + // used to go through BB. We need to delete it or update it. + for (auto I = OtherPred->begin(), E = OtherPred->end(); + I != E;) { + Instruction &Inst = *I; I++; + if (isa(Inst)) + Inst.eraseFromParent(); + } + + SmallSet Succs; + Succs.insert(succ_begin(BB), succ_end(BB)); + for (BasicBlock *Succ : Succs) { + Succ->removePredecessor(BB); + } + + IRBuilder<> Builder(BI); + Builder.CreateUnreachable(); + BI->eraseFromParent(); + return true; + } + return false; +} + bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ BasicBlock *BB = BI->getParent(); @@ -3979,7 +5002,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ return true; // If the Terminator is the only non-phi instruction, simplify the block. - BasicBlock::iterator I = BB->getFirstNonPHIOrDbg(); + BasicBlock::iterator I = BB->getFirstNonPHIOrDbg()->getIterator(); if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() && TryToSimplifyUncondBranchFromEmptyBlock(BB)) return true; @@ -3991,19 +5014,39 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ for (++I; isa(I); ++I) ; if (I->isTerminator() && - TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL, AT)) + TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, DL, TTI, + BonusInstThreshold, AC)) return true; } + // See if we can merge an empty landing pad block with another which is + // equivalent. + if (LandingPadInst *LPad = dyn_cast(I)) { + for (++I; isa(I); ++I) {} + if (I->isTerminator() && + TryToMergeLandingPad(LPad, BI, BB)) + return true; + } + // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and our successor, fold the comparison into the // predecessor and use logical operations to update the incoming value // for PHI nodes in common successor. - if (FoldBranchToCommonDest(BI, DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (FoldBranchToCommonDest(BI, BonusInstThreshold)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; return false; } +static BasicBlock *allPredecessorsComeFromSameSource(BasicBlock *BB) { + BasicBlock *PredPred = nullptr; + for (auto *P : predecessors(BB)) { + BasicBlock *PPred = P->getSinglePredecessor(); + if (!PPred || (PredPred && PredPred != PPred)) + return nullptr; + PredPred = PPred; + } + return PredPred; +} bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { BasicBlock *BB = BI->getParent(); @@ -4015,7 +5058,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { // switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; // This block must be empty, except for the setcond inst, if it exists. // Ignore dbg intrinsics. @@ -4025,26 +5068,26 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { ++I; if (&*I == BI) { if (FoldValueComparisonIntoPredecessors(BI, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } else if (&*I == cast(BI->getCondition())){ ++I; // Ignore dbg intrinsics. while (isa(I)) ++I; if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } } // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction. - if (SimplifyBranchOnICmpChain(BI, DL, Builder)) + if (SimplifyBranchOnICmpChain(BI, Builder, DL)) return true; // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and one of our successors, fold the comparison into the // predecessor and use logical operations to pick the right destination. - if (FoldBranchToCommonDest(BI, DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (FoldBranchToCommonDest(BI, BonusInstThreshold)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; // We have a conditional branch to two blocks that are only reachable // from BI. We know that the condbr dominates the two blocks, so see if @@ -4052,16 +5095,16 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { // can hoist it up to the branching block. if (BI->getSuccessor(0)->getSinglePredecessor()) { if (BI->getSuccessor(1)->getSinglePredecessor()) { - if (HoistThenElseCodeToIf(BI, DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (HoistThenElseCodeToIf(BI, TTI)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } else { // If Successor #1 has multiple preds, we may be able to conditionally // execute Successor #0 if it branches to Successor #1. TerminatorInst *Succ0TI = BI->getSuccessor(0)->getTerminator(); if (Succ0TI->getNumSuccessors() == 1 && Succ0TI->getSuccessor(0) == BI->getSuccessor(1)) - if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), TTI)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } } else if (BI->getSuccessor(1)->getSinglePredecessor()) { // If Successor #0 has multiple preds, we may be able to conditionally @@ -4069,8 +5112,8 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { TerminatorInst *Succ1TI = BI->getSuccessor(1)->getTerminator(); if (Succ1TI->getNumSuccessors() == 1 && Succ1TI->getSuccessor(0) == BI->getSuccessor(0)) - if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), TTI)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; } // If this is a branch on a phi node in the current block, thread control @@ -4078,15 +5121,23 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (PHINode *PN = dyn_cast(BI->getCondition())) if (PN->getParent() == BI->getParent()) if (FoldCondBranchOnPHI(BI, DL)) - return SimplifyCFG(BB, TTI, DL, AT) | true; + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; // Scan predecessor blocks for conditional branches. for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) if (BranchInst *PBI = dyn_cast((*PI)->getTerminator())) if (PBI != BI && PBI->isConditional()) - if (SimplifyCondBranchToCondBranch(PBI, BI)) - return SimplifyCFG(BB, TTI, DL, AT) | true; - + if (SimplifyCondBranchToCondBranch(PBI, BI, DL)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; + + // Look for diamond patterns. + if (MergeCondStores) + if (BasicBlock *PrevBB = allPredecessorsComeFromSameSource(BB)) + if (BranchInst *PBI = dyn_cast(PrevBB->getTerminator())) + if (PBI != BI && PBI->isConditional()) + if (mergeConditionalStores(PBI, BI)) + return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; + return false; } @@ -4166,7 +5217,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { // Remove basic blocks that have no predecessors (except the entry block)... // or that just have themself as a predecessor. These are unreachable. - if ((pred_begin(BB) == pred_end(BB) && + if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) || BB->getSinglePredecessor() == BB) { DEBUG(dbgs() << "Removing BB: \n" << *BB); @@ -4197,7 +5248,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { // eliminate it, do so now. if (PHINode *PN = dyn_cast(BB->begin())) if (PN->getNumIncomingValues() == 2) - Changed |= FoldTwoEntryPHINode(PN, DL); + Changed |= FoldTwoEntryPHINode(PN, TTI, DL); Builder.SetInsertPoint(BB->getTerminator()); if (BranchInst *BI = dyn_cast(BB->getTerminator())) { @@ -4210,6 +5261,9 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { if (SimplifyReturn(RI, Builder)) return true; } else if (ResumeInst *RI = dyn_cast(BB->getTerminator())) { if (SimplifyResume(RI, Builder)) return true; + } else if (CleanupReturnInst *RI = + dyn_cast(BB->getTerminator())) { + if (SimplifyCleanupReturn(RI)) return true; } else if (SwitchInst *SI = dyn_cast(BB->getTerminator())) { if (SimplifySwitch(SI, Builder)) return true; } else if (UnreachableInst *UI = @@ -4223,12 +5277,13 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { return Changed; } -/// SimplifyCFG - This function is used to do simplification of a CFG. For -/// example, it adjusts branches to branches to eliminate the extra hop, it +/// This function is used to do simplification of a CFG. +/// For example, it adjusts branches to branches to eliminate the extra hop, /// eliminates unreachable basic blocks, and does other "peephole" optimization /// of the CFG. It returns true if a modification was made. /// bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, - const DataLayout *DL, AssumptionTracker *AT) { - return SimplifyCFGOpt(TTI, DL, AT).run(BB); + unsigned BonusInstThreshold, AssumptionCache *AC) { + return SimplifyCFGOpt(TTI, BB->getModule()->getDataLayout(), + BonusInstThreshold, AC).run(BB); }