X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FVectorize%2FSLPVectorizer.cpp;h=e2a1e5c32d247d0efaf15c67b4d53253b951f897;hp=b287ca7c8d54eeb25cc952e4a689d9180b3d4f38;hb=8e810aeec3bc2a772d8d6130dd783ca8f2822407;hpb=3c940067424204ecffb48ddc269895d48442279a diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index b287ca7c8d5..e2a1e5c32d2 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -23,19 +23,20 @@ #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/Analysis/Verifier.h" -#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" +#include "llvm/IR/Verifier.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -49,32 +50,22 @@ static cl::opt SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, cl::desc("Only vectorize if you gain more than this " "number ")); + +static cl::opt +ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, + cl::desc("Attempt to vectorize horizontal reductions")); + +static cl::opt ShouldStartVectorizeHorAtStore( + "slp-vectorize-hor-store", cl::init(false), cl::Hidden, + cl::desc( + "Attempt to vectorize horizontal reductions feeding into a store")); + namespace { static const unsigned MinVecRegSize = 128; static const unsigned RecursionMaxDepth = 12; -/// RAII pattern to save the insertion point of the IR builder. -class BuilderLocGuard { -public: - BuilderLocGuard(IRBuilder<> &B) : Builder(B), Loc(B.GetInsertPoint()), - DbgLoc(B.getCurrentDebugLocation()) {} - ~BuilderLocGuard() { - Builder.SetCurrentDebugLocation(DbgLoc); - if (Loc) - Builder.SetInsertPoint(Loc); - } - -private: - // Prevent copying. - BuilderLocGuard(const BuilderLocGuard &); - BuilderLocGuard &operator=(const BuilderLocGuard &); - IRBuilder<> &Builder; - AssertingVH Loc; - DebugLoc DbgLoc; -}; - /// A helper class for numbering instructions in multiple blocks. /// Numbers start at zero for each basic block. struct BlockNumbering { @@ -173,6 +164,37 @@ static unsigned getSameOpcode(ArrayRef VL) { return Opcode; } +/// \returns \p I after propagating metadata from \p VL. +static Instruction *propagateMetadata(Instruction *I, ArrayRef VL) { + Instruction *I0 = cast(VL[0]); + SmallVector, 4> Metadata; + I0->getAllMetadataOtherThanDebugLoc(Metadata); + + for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { + unsigned Kind = Metadata[i].first; + MDNode *MD = Metadata[i].second; + + for (int i = 1, e = VL.size(); MD && i != e; i++) { + Instruction *I = cast(VL[i]); + MDNode *IMD = I->getMetadata(Kind); + + switch (Kind) { + default: + MD = 0; // Remove unknown metadata + break; + case LLVMContext::MD_tbaa: + MD = MDNode::getMostGenericTBAA(MD, IMD); + break; + case LLVMContext::MD_fpmath: + MD = MDNode::getMostGenericFPMath(MD, IMD); + break; + } + } + I->setMetadata(Kind, MD); + } + return I; +} + /// \returns The type that all of the values in \p VL have or null if there /// are different types. static Type* getSameType(ArrayRef VL) { @@ -216,6 +238,104 @@ static bool CanReuseExtract(ArrayRef VL) { return true; } +static void reorderInputsAccordingToOpcode(ArrayRef VL, + SmallVectorImpl &Left, + SmallVectorImpl &Right) { + + SmallVector OrigLeft, OrigRight; + + bool AllSameOpcodeLeft = true; + bool AllSameOpcodeRight = true; + for (unsigned i = 0, e = VL.size(); i != e; ++i) { + Instruction *I = cast(VL[i]); + Value *V0 = I->getOperand(0); + Value *V1 = I->getOperand(1); + + OrigLeft.push_back(V0); + OrigRight.push_back(V1); + + Instruction *I0 = dyn_cast(V0); + Instruction *I1 = dyn_cast(V1); + + // Check whether all operands on one side have the same opcode. In this case + // we want to preserve the original order and not make things worse by + // reordering. + AllSameOpcodeLeft = I0; + AllSameOpcodeRight = I1; + + if (i && AllSameOpcodeLeft) { + if(Instruction *P0 = dyn_cast(OrigLeft[i-1])) { + if(P0->getOpcode() != I0->getOpcode()) + AllSameOpcodeLeft = false; + } else + AllSameOpcodeLeft = false; + } + if (i && AllSameOpcodeRight) { + if(Instruction *P1 = dyn_cast(OrigRight[i-1])) { + if(P1->getOpcode() != I1->getOpcode()) + AllSameOpcodeRight = false; + } else + AllSameOpcodeRight = false; + } + + // Sort two opcodes. In the code below we try to preserve the ability to use + // broadcast of values instead of individual inserts. + // vl1 = load + // vl2 = phi + // vr1 = load + // vr2 = vr2 + // = vl1 x vr1 + // = vl2 x vr2 + // If we just sorted according to opcode we would leave the first line in + // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). + // = vl1 x vr1 + // = vr2 x vl2 + // Because vr2 and vr1 are from the same load we loose the opportunity of a + // broadcast for the packed right side in the backend: we have [vr1, vl2] + // instead of [vr1, vr2=vr1]. + if (I0 && I1) { + if(!i && I0->getOpcode() > I1->getOpcode()) { + Left.push_back(I1); + Right.push_back(I0); + } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { + // Try not to destroy a broad cast for no apparent benefit. + Left.push_back(I1); + Right.push_back(I0); + } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { + // Try preserve broadcasts. + Left.push_back(I1); + Right.push_back(I0); + } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { + // Try preserve broadcasts. + Left.push_back(I1); + Right.push_back(I0); + } else { + Left.push_back(I0); + Right.push_back(I1); + } + continue; + } + // One opcode, put the instruction on the right. + if (I0) { + Left.push_back(V1); + Right.push_back(I0); + continue; + } + Left.push_back(V0); + Right.push_back(V1); + } + + bool LeftBroadcast = isSplat(Left); + bool RightBroadcast = isSplat(Right); + + // Don't reorder if the operands where good to begin with. + if (!(LeftBroadcast || RightBroadcast) && + (AllSameOpcodeRight || AllSameOpcodeLeft)) { + Left = OrigLeft; + Right = OrigRight; + } +} + /// Bottom Up SLP Vectorizer. class BoUpSLP { public: @@ -238,17 +358,20 @@ public: } /// \brief Vectorize the tree that starts with the elements in \p VL. - void vectorizeTree(); + /// Returns the vectorized root. + Value *vectorizeTree(); /// \returns the vectorization cost of the subtree that starts at \p VL. /// A negative number means that this is profitable. int getTreeCost(); - /// Construct a vectorizable tree that starts at \p Roots. - void buildTree(ArrayRef Roots); + /// Construct a vectorizable tree that starts at \p Roots and is possibly + /// used by a reduction of \p RdxOps. + void buildTree(ArrayRef Roots, ValueSet *RdxOps = 0); /// Clear the internal data structures that are created by 'buildTree'. void deleteTree() { + RdxOps = 0; VectorizableTree.clear(); ScalarToTreeEntry.clear(); MustGather.clear(); @@ -305,7 +428,7 @@ private: /// \returns the pointer to the barrier instruction if we can't sink. Value *getSinkBarrier(Instruction *Src, Instruction *Dst); - /// \returns the index of the last instrucion in the BB from \p VL. + /// \returns the index of the last instruction in the BB from \p VL. int getLastIndex(ArrayRef VL); /// \returns the Instruction in the bundle \p VL. @@ -318,6 +441,10 @@ private: /// \returns a vector from a collection of scalars in \p VL. Value *Gather(ArrayRef VL, VectorType *Ty); + /// \returns whether the VectorizableTree is fully vectoriable and will + /// be beneficial even the tree height is tiny. + bool isFullyVectorizableTinyTree(); + struct TreeEntry { TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), NeedToGather(0) {} @@ -325,10 +452,7 @@ private: /// \returns true if the scalars in VL are equal to this entry. bool isSame(ArrayRef VL) const { assert(VL.size() == Scalars.size() && "Invalid size"); - for (int i = 0, e = VL.size(); i != e; ++i) - if (VL[i] != Scalars[i]) - return false; - return true; + return std::equal(VL.begin(), VL.end(), Scalars.begin()); } /// A vector of scalars. @@ -397,10 +521,15 @@ private: /// Holds all of the instructions that we gathered. SetVector GatherSeq; + /// A list of blocks that we are going to CSE. + SetVector CSEBlocks; /// Numbers instructions in different blocks. DenseMap BlocksNumbers; + /// Reduction operators. + ValueSet *RdxOps; + // Analysis and block reference. Function *F; ScalarEvolution *SE; @@ -413,8 +542,9 @@ private: IRBuilder<> Builder; }; -void BoUpSLP::buildTree(ArrayRef Roots) { +void BoUpSLP::buildTree(ArrayRef Roots, ValueSet *Rdx) { deleteTree(); + RdxOps = Rdx; if (!getSameType(Roots)) return; buildTree_rec(Roots, 0); @@ -435,18 +565,20 @@ void BoUpSLP::buildTree(ArrayRef Roots) { UE = Scalar->use_end(); User != UE; ++User) { DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n"); - bool Gathered = MustGather.count(*User); - // Skip in-tree scalars that become vectors. - if (ScalarToTreeEntry.count(*User) && !Gathered) { + if (ScalarToTreeEntry.count(*User)) { DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << **User << ".\n"); int Idx = ScalarToTreeEntry[*User]; (void) Idx; assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); continue; } + Instruction *UserInst = dyn_cast(*User); + if (!UserInst) + continue; - if (!isa(*User)) + // Ignore uses that are part of the reduction. + if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end()) continue; DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " << @@ -578,6 +710,10 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { continue; } + // This user is part of the reduction. + if (RdxOps && RdxOps->count(User)) + continue; + // Make sure that we can schedule this unknown user. BlockNumbering &BN = BlocksNumbers[BB]; int UserIndex = BN.getIndex(User); @@ -674,13 +810,14 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { } case Instruction::Load: { // Check if the loads are consecutive or of we need to swizzle them. - for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) - if (!isConsecutiveAccess(VL[i], VL[i + 1])) { + for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { + LoadInst *L = cast(VL[i]); + if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { newTreeEntry(VL, false); DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); return; } - + } newTreeEntry(VL, true); DEBUG(dbgs() << "SLP: added a vector of loads.\n"); return; @@ -769,6 +906,16 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { newTreeEntry(VL, true); DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); + // Sort operands of the instructions so that each side is more likely to + // have the same opcode. + if (isa(VL0) && VL0->isCommutative()) { + ValueList Left, Right; + reorderInputsAccordingToOpcode(VL, Left, Right); + buildTree_rec(Left, Depth + 1); + buildTree_rec(Right, Depth + 1); + return; + } + for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. @@ -784,7 +931,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) if (!isConsecutiveAccess(VL[i], VL[i + 1])) { newTreeEntry(VL, false); - DEBUG(dbgs() << "SLP: Non consecutive store.\n"); + DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); return; } @@ -890,9 +1037,38 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); } else { - ScalarCost = VecTy->getNumElements() * - TTI->getArithmeticInstrCost(Opcode, ScalarTy); - VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy); + // Certain instructions can be cheaper to vectorize if they have a + // constant second vector operand. + TargetTransformInfo::OperandValueKind Op1VK = + TargetTransformInfo::OK_AnyValue; + TargetTransformInfo::OperandValueKind Op2VK = + TargetTransformInfo::OK_UniformConstantValue; + + // If all operands are exactly the same ConstantInt then set the + // operand kind to OK_UniformConstantValue. + // If instead not all operands are constants, then set the operand kind + // to OK_AnyValue. If all operands are constants but not the same, + // then set the operand kind to OK_NonUniformConstantValue. + ConstantInt *CInt = NULL; + for (unsigned i = 0; i < VL.size(); ++i) { + const Instruction *I = cast(VL[i]); + if (!isa(I->getOperand(1))) { + Op2VK = TargetTransformInfo::OK_AnyValue; + break; + } + if (i == 0) { + CInt = cast(I->getOperand(1)); + continue; + } + if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && + CInt != cast(I->getOperand(1))) + Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; + } + + ScalarCost = + VecTy->getNumElements() * + TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); + VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); } return VecCost - ScalarCost; } @@ -900,14 +1076,14 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { // Cost of wide load - cost of scalar loads. int ScalarLdCost = VecTy->getNumElements() * TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); - int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); + int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); return VecLdCost - ScalarLdCost; } case Instruction::Store: { // We know that we can merge the stores. Calculate the cost. int ScalarStCost = VecTy->getNumElements() * TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); - int VecStCost = TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); + int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); return VecStCost - ScalarStCost; } default: @@ -915,19 +1091,32 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { } } +bool BoUpSLP::isFullyVectorizableTinyTree() { + DEBUG(dbgs() << "SLP: Check whether the tree with height " << + VectorizableTree.size() << " is fully vectorizable .\n"); + + // We only handle trees of height 2. + if (VectorizableTree.size() != 2) + return false; + + // Gathering cost would be too much for tiny trees. + if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) + return false; + + return true; +} + int BoUpSLP::getTreeCost() { int Cost = 0; DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"); - // Don't vectorize tiny trees. Small load/store chains or consecutive stores - // of constants will be vectoried in SelectionDAG in MergeConsecutiveStores. - // The SelectionDAG vectorizer can only handle pairs (trees of height = 2). - if (VectorizableTree.size() < 3) { + // We only vectorize tiny trees if it is fully vectorizable. + if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { if (!VectorizableTree.size()) { assert(!ExternalUses.size() && "We should not have any external users"); } - return 0; + return INT_MAX; } unsigned BundleWidth = VectorizableTree[0].Scalars.size(); @@ -939,16 +1128,19 @@ int BoUpSLP::getTreeCost() { Cost += C; } + SmallSet ExtractCostCalculated; int ExtractCost = 0; for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); I != E; ++I) { + // We only add extract cost once for the same scalar. + if (!ExtractCostCalculated.insert(I->Scalar)) + continue; VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I->Lane); } - DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); return Cost + ExtractCost; } @@ -1100,6 +1292,7 @@ Value *BoUpSLP::Gather(ArrayRef VL, VectorType *Ty) { Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); if (Instruction *Insrt = dyn_cast(Vec)) { GatherSeq.insert(Insrt); + CSEBlocks.insert(Insrt->getParent()); // Add to our 'need-to-extract' list. if (ScalarToTreeEntry.count(VL[i])) { @@ -1152,7 +1345,7 @@ Value *BoUpSLP::vectorizeTree(ArrayRef VL) { } Value *BoUpSLP::vectorizeTree(TreeEntry *E) { - BuilderLocGuard Guard(Builder); + IRBuilder<>::InsertPointGuard Guard(Builder); if (E->VectorizedValue) { DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); @@ -1176,7 +1369,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { switch (Opcode) { case Instruction::PHI: { PHINode *PH = dyn_cast(VL0); - Builder.SetInsertPoint(PH->getParent()->getFirstInsertionPt()); + Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); Builder.SetCurrentDebugLocation(PH->getDebugLoc()); PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); E->VectorizedValue = NewPhi; @@ -1312,10 +1505,13 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { case Instruction::Or: case Instruction::Xor: { ValueList LHSVL, RHSVL; - for (int i = 0, e = E->Scalars.size(); i < e; ++i) { - LHSVL.push_back(cast(E->Scalars[i])->getOperand(0)); - RHSVL.push_back(cast(E->Scalars[i])->getOperand(1)); - } + if (isa(VL0) && VL0->isCommutative()) + reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); + else + for (int i = 0, e = E->Scalars.size(); i < e; ++i) { + LHSVL.push_back(cast(E->Scalars[i])->getOperand(0)); + RHSVL.push_back(cast(E->Scalars[i])->getOperand(1)); + } setInsertPointAfterBundle(E->Scalars); @@ -1332,6 +1528,10 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { BinaryOperator *BinOp = cast(VL0); Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); E->VectorizedValue = V; + + if (Instruction *I = dyn_cast(V)) + return propagateMetadata(I, E->Scalars); + return V; } case Instruction::Load: { @@ -1340,17 +1540,20 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { setInsertPointAfterBundle(E->Scalars); LoadInst *LI = cast(VL0); - Value *VecPtr = - Builder.CreateBitCast(LI->getPointerOperand(), VecTy->getPointerTo()); + unsigned AS = LI->getPointerAddressSpace(); + + Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), + VecTy->getPointerTo(AS)); unsigned Alignment = LI->getAlignment(); LI = Builder.CreateLoad(VecPtr); LI->setAlignment(Alignment); E->VectorizedValue = LI; - return LI; + return propagateMetadata(LI, E->Scalars); } case Instruction::Store: { StoreInst *SI = cast(VL0); unsigned Alignment = SI->getAlignment(); + unsigned AS = SI->getPointerAddressSpace(); ValueList ValueOp; for (int i = 0, e = E->Scalars.size(); i < e; ++i) @@ -1359,12 +1562,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { setInsertPointAfterBundle(E->Scalars); Value *VecValue = vectorizeTree(ValueOp); - Value *VecPtr = - Builder.CreateBitCast(SI->getPointerOperand(), VecTy->getPointerTo()); + Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), + VecTy->getPointerTo(AS)); StoreInst *S = Builder.CreateStore(VecValue, VecPtr); S->setAlignment(Alignment); E->VectorizedValue = S; - return S; + return propagateMetadata(S, E->Scalars); } default: llvm_unreachable("unknown inst"); @@ -1372,7 +1575,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { return 0; } -void BoUpSLP::vectorizeTree() { +Value *BoUpSLP::vectorizeTree() { Builder.SetInsertPoint(F->getEntryBlock().begin()); vectorizeTree(&VectorizableTree[0]); @@ -1404,6 +1607,7 @@ void BoUpSLP::vectorizeTree() { if (PHINode *PN = dyn_cast(Vec)) { Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt()); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + CSEBlocks.insert(PN->getParent()); User->replaceUsesOfWith(Scalar, Ex); } else if (isa(Vec)){ if (PHINode *PH = dyn_cast(User)) { @@ -1411,17 +1615,20 @@ void BoUpSLP::vectorizeTree() { if (PH->getIncomingValue(i) == Scalar) { Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + CSEBlocks.insert(PH->getIncomingBlock(i)); PH->setOperand(i, Ex); } } } else { Builder.SetInsertPoint(cast(User)); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + CSEBlocks.insert(cast(User)->getParent()); User->replaceUsesOfWith(Scalar, Ex); } } else { Builder.SetInsertPoint(F->getEntryBlock().begin()); Value *Ex = Builder.CreateExtractElement(Vec, Lane); + CSEBlocks.insert(&F->getEntryBlock()); User->replaceUsesOfWith(Scalar, Ex); } @@ -1447,9 +1654,10 @@ void BoUpSLP::vectorizeTree() { for (Value::use_iterator User = Scalar->use_begin(), UE = Scalar->use_end(); User != UE; ++User) { DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n"); - assert(!MustGather.count(*User) && - "Replacing gathered value with undef"); - assert(ScalarToTreeEntry.count(*User) && + + assert((ScalarToTreeEntry.count(*User) || + // It is legal to replace the reduction users by undef. + (RdxOps && RdxOps->count(*User))) && "Replacing out-of-tree value with undef"); } Value *Undef = UndefValue::get(Ty); @@ -1464,8 +1672,20 @@ void BoUpSLP::vectorizeTree() { BlocksNumbers[it].forget(); } Builder.ClearInsertionPoint(); + + return VectorizableTree[0].VectorizedValue; } +class DTCmp { + const DominatorTree *DT; + +public: + DTCmp(const DominatorTree *DT) : DT(DT) {} + bool operator()(const BasicBlock *A, const BasicBlock *B) const { + return DT->properlyDominates(A, B); + } +}; + void BoUpSLP::optimizeGatherSequence() { DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() << " gather sequences instructions.\n"); @@ -1501,45 +1721,48 @@ void BoUpSLP::optimizeGatherSequence() { Insert->moveBefore(PreHeader->getTerminator()); } + // Sort blocks by domination. This ensures we visit a block after all blocks + // dominating it are visited. + SmallVector CSEWorkList(CSEBlocks.begin(), CSEBlocks.end()); + std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT)); + // Perform O(N^2) search over the gather sequences and merge identical // instructions. TODO: We can further optimize this scan if we split the // instructions into different buckets based on the insert lane. - SmallPtrSet Visited; - SmallVector ToRemove; - ReversePostOrderTraversal RPOT(F); - for (ReversePostOrderTraversal::rpo_iterator I = RPOT.begin(), - E = RPOT.end(); I != E; ++I) { + SmallVector Visited; + for (SmallVectorImpl::iterator I = CSEWorkList.begin(), + E = CSEWorkList.end(); + I != E; ++I) { + assert((I == CSEWorkList.begin() || !DT->dominates(*I, *llvm::prior(I))) && + "Worklist not sorted properly!"); BasicBlock *BB = *I; - // For all instructions in the function: - for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { - Instruction *In = it; - if ((!isa(In) && !isa(In)) || - !GatherSeq.count(In)) + // For all instructions in blocks containing gather sequences: + for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { + Instruction *In = it++; + if (!isa(In) && !isa(In)) continue; // Check if we can replace this instruction with any of the // visited instructions. - for (SmallPtrSet::iterator v = Visited.begin(), - ve = Visited.end(); v != ve; ++v) { + for (SmallVectorImpl::iterator v = Visited.begin(), + ve = Visited.end(); + v != ve; ++v) { if (In->isIdenticalTo(*v) && DT->dominates((*v)->getParent(), In->getParent())) { In->replaceAllUsesWith(*v); - ToRemove.push_back(In); + In->eraseFromParent(); In = 0; break; } } - if (In) - Visited.insert(In); + if (In) { + assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); + Visited.push_back(In); + } } } - - // Erase all of the instructions that we RAUWed. - for (SmallVectorImpl::iterator v = ToRemove.begin(), - ve = ToRemove.end(); v != ve; ++v) { - assert((*v)->getNumUses() == 0 && "Can't remove instructions with uses"); - (*v)->eraseFromParent(); - } + CSEBlocks.clear(); + GatherSeq.clear(); } /// The SLPVectorizer Pass. @@ -1562,16 +1785,24 @@ struct SLPVectorizer : public FunctionPass { DominatorTree *DT; virtual bool runOnFunction(Function &F) { + if (skipOptnoneFunction(F)) + return false; + SE = &getAnalysis(); DL = getAnalysisIfAvailable(); TTI = &getAnalysis(); AA = &getAnalysis(); LI = &getAnalysis(); - DT = &getAnalysis(); + DT = &getAnalysis().getDomTree(); StoreRefs.clear(); bool Changed = false; + // If the target claims to have no vector registers don't attempt + // vectorization. + if (!TTI->getNumberOfRegisters(true)) + return false; + // Must have DataLayout. We can't require it because some tests run w/o // triple. if (!DL) @@ -1583,7 +1814,7 @@ struct SLPVectorizer : public FunctionPass { DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); - // Use the bollom up slp vectorizer to construct chains that start with + // Use the bottom up slp vectorizer to construct chains that start with // he store instructions. BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); @@ -1617,9 +1848,9 @@ struct SLPVectorizer : public FunctionPass { AU.addRequired(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.setPreservesCFG(); } @@ -1657,6 +1888,21 @@ private: StoreListMap StoreRefs; }; +/// \brief Check that the Values in the slice in VL array are still existent in +/// the WeakVH array. +/// Vectorization of part of the VL array may cause later values in the VL array +/// to become invalid. We track when this has happened in the WeakVH array. +static bool hasValueBeenRAUWed(ArrayRef &VL, + SmallVectorImpl &VH, + unsigned SliceBegin, + unsigned SliceSize) { + for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) + if (VH[i] != VL[i]) + return true; + + return false; +} + bool SLPVectorizer::vectorizeStoreChain(ArrayRef Chain, int CostThreshold, BoUpSLP &R) { unsigned ChainLen = Chain.size(); @@ -1669,11 +1915,19 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef Chain, if (!isPowerOf2_32(Sz) || VF < 2) return false; + // Keep track of values that were delete by vectorizing in the loop below. + SmallVector TrackValues(Chain.begin(), Chain.end()); + bool Changed = false; // Look for profitable vectorizable trees at all offsets, starting at zero. for (unsigned i = 0, e = ChainLen; i < e; ++i) { if (i + VF > e) break; + + // Check that a previous iteration of this loop did not delete the Value. + if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) + continue; + DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i << "\n"); ArrayRef Operands = Chain.slice(i, VF); @@ -1693,7 +1947,7 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef Chain, } } - return Changed; + return Changed; } bool SLPVectorizer::vectorizeStores(ArrayRef Stores, @@ -1760,15 +2014,17 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { if (!SI) continue; + // Don't touch volatile stores. + if (!SI->isSimple()) + continue; + // Check that the pointer points to scalars. Type *Ty = SI->getValueOperand()->getType(); if (Ty->isAggregateType() || Ty->isVectorTy()) return 0; - // Find the base of the GEP. - Value *Ptr = SI->getPointerOperand(); - if (GetElementPtrInst *GEP = dyn_cast(Ptr)) - Ptr = GEP->getPointerOperand(); + // Find the base pointer. + Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); // Save the store locations. StoreRefs[Ptr].push_back(SI); @@ -1796,7 +2052,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef VL, BoUpSLP &R) { return false; unsigned Opcode0 = I0->getOpcode(); - + Type *Ty0 = I0->getType(); unsigned Sz = DL->getTypeSizeInBits(Ty0); unsigned VF = MinVecRegSize / Sz; @@ -1811,11 +2067,14 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef VL, BoUpSLP &R) { } bool Changed = false; - + + // Keep track of values that were delete by vectorizing in the loop below. + SmallVector TrackValues(VL.begin(), VL.end()); + for (unsigned i = 0, e = VL.size(); i < e; ++i) { unsigned OpsWidth = 0; - - if (i + VF > e) + + if (i + VF > e) OpsWidth = e - i; else OpsWidth = VF; @@ -1823,23 +2082,28 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef VL, BoUpSLP &R) { if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) break; - DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"); + // Check that a previous iteration of this loop did not delete the Value. + if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) + continue; + + DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " + << "\n"); ArrayRef Ops = VL.slice(i, OpsWidth); - + R.buildTree(Ops); int Cost = R.getTreeCost(); - + if (Cost < -SLPCostThreshold) { DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n"); R.vectorizeTree(); - + // Move to the next bundle. i += VF - 1; Changed = true; } } - - return Changed; + + return Changed; } bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { @@ -1882,6 +2146,307 @@ bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { return 0; } +/// \brief Generate a shuffle mask to be used in a reduction tree. +/// +/// \param VecLen The length of the vector to be reduced. +/// \param NumEltsToRdx The number of elements that should be reduced in the +/// vector. +/// \param IsPairwise Whether the reduction is a pairwise or splitting +/// reduction. A pairwise reduction will generate a mask of +/// <0,2,...> or <1,3,..> while a splitting reduction will generate +/// <2,3, undef,undef> for a vector of 4 and NumElts = 2. +/// \param IsLeft True will generate a mask of even elements, odd otherwise. +static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, + bool IsPairwise, bool IsLeft, + IRBuilder<> &Builder) { + assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); + + SmallVector ShuffleMask( + VecLen, UndefValue::get(Builder.getInt32Ty())); + + if (IsPairwise) + // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). + for (unsigned i = 0; i != NumEltsToRdx; ++i) + ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); + else + // Move the upper half of the vector to the lower half. + for (unsigned i = 0; i != NumEltsToRdx; ++i) + ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); + + return ConstantVector::get(ShuffleMask); +} + + +/// Model horizontal reductions. +/// +/// A horizontal reduction is a tree of reduction operations (currently add and +/// fadd) that has operations that can be put into a vector as its leaf. +/// For example, this tree: +/// +/// mul mul mul mul +/// \ / \ / +/// + + +/// \ / +/// + +/// This tree has "mul" as its reduced values and "+" as its reduction +/// operations. A reduction might be feeding into a store or a binary operation +/// feeding a phi. +/// ... +/// \ / +/// + +/// | +/// phi += +/// +/// Or: +/// ... +/// \ / +/// + +/// | +/// *p = +/// +class HorizontalReduction { + SmallPtrSet ReductionOps; + SmallVector ReducedVals; + + BinaryOperator *ReductionRoot; + PHINode *ReductionPHI; + + /// The opcode of the reduction. + unsigned ReductionOpcode; + /// The opcode of the values we perform a reduction on. + unsigned ReducedValueOpcode; + /// The width of one full horizontal reduction operation. + unsigned ReduxWidth; + /// Should we model this reduction as a pairwise reduction tree or a tree that + /// splits the vector in halves and adds those halves. + bool IsPairwiseReduction; + +public: + HorizontalReduction() + : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), + ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} + + /// \brief Try to find a reduction tree. + bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, + DataLayout *DL) { + assert((!Phi || + std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && + "Thi phi needs to use the binary operator"); + + // We could have a initial reductions that is not an add. + // r *= v1 + v2 + v3 + v4 + // In such a case start looking for a tree rooted in the first '+'. + if (Phi) { + if (B->getOperand(0) == Phi) { + Phi = 0; + B = dyn_cast(B->getOperand(1)); + } else if (B->getOperand(1) == Phi) { + Phi = 0; + B = dyn_cast(B->getOperand(0)); + } + } + + if (!B) + return false; + + Type *Ty = B->getType(); + if (Ty->isVectorTy()) + return false; + + ReductionOpcode = B->getOpcode(); + ReducedValueOpcode = 0; + ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); + ReductionRoot = B; + ReductionPHI = Phi; + + if (ReduxWidth < 4) + return false; + + // We currently only support adds. + if (ReductionOpcode != Instruction::Add && + ReductionOpcode != Instruction::FAdd) + return false; + + // Post order traverse the reduction tree starting at B. We only handle true + // trees containing only binary operators. + SmallVector, 32> Stack; + Stack.push_back(std::make_pair(B, 0)); + while (!Stack.empty()) { + BinaryOperator *TreeN = Stack.back().first; + unsigned EdgeToVist = Stack.back().second++; + bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; + + // Only handle trees in the current basic block. + if (TreeN->getParent() != B->getParent()) + return false; + + // Each tree node needs to have one user except for the ultimate + // reduction. + if (!TreeN->hasOneUse() && TreeN != B) + return false; + + // Postorder vist. + if (EdgeToVist == 2 || IsReducedValue) { + if (IsReducedValue) { + // Make sure that the opcodes of the operations that we are going to + // reduce match. + if (!ReducedValueOpcode) + ReducedValueOpcode = TreeN->getOpcode(); + else if (ReducedValueOpcode != TreeN->getOpcode()) + return false; + ReducedVals.push_back(TreeN); + } else { + // We need to be able to reassociate the adds. + if (!TreeN->isAssociative()) + return false; + ReductionOps.insert(TreeN); + } + // Retract. + Stack.pop_back(); + continue; + } + + // Visit left or right. + Value *NextV = TreeN->getOperand(EdgeToVist); + BinaryOperator *Next = dyn_cast(NextV); + if (Next) + Stack.push_back(std::make_pair(Next, 0)); + else if (NextV != Phi) + return false; + } + return true; + } + + /// \brief Attempt to vectorize the tree found by + /// matchAssociativeReduction. + bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { + if (ReducedVals.empty()) + return false; + + unsigned NumReducedVals = ReducedVals.size(); + if (NumReducedVals < ReduxWidth) + return false; + + Value *VectorizedTree = 0; + IRBuilder<> Builder(ReductionRoot); + FastMathFlags Unsafe; + Unsafe.setUnsafeAlgebra(); + Builder.SetFastMathFlags(Unsafe); + unsigned i = 0; + + for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { + ArrayRef ValsToReduce(&ReducedVals[i], ReduxWidth); + V.buildTree(ValsToReduce, &ReductionOps); + + // Estimate cost. + int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); + if (Cost >= -SLPCostThreshold) + break; + + DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost + << ". (HorRdx)\n"); + + // Vectorize a tree. + DebugLoc Loc = cast(ReducedVals[i])->getDebugLoc(); + Value *VectorizedRoot = V.vectorizeTree(); + + // Emit a reduction. + Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); + if (VectorizedTree) { + Builder.SetCurrentDebugLocation(Loc); + VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, + ReducedSubTree, "bin.rdx"); + } else + VectorizedTree = ReducedSubTree; + } + + if (VectorizedTree) { + // Finish the reduction. + for (; i < NumReducedVals; ++i) { + Builder.SetCurrentDebugLocation( + cast(ReducedVals[i])->getDebugLoc()); + VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, + ReducedVals[i]); + } + // Update users. + if (ReductionPHI) { + assert(ReductionRoot != NULL && "Need a reduction operation"); + ReductionRoot->setOperand(0, VectorizedTree); + ReductionRoot->setOperand(1, ReductionPHI); + } else + ReductionRoot->replaceAllUsesWith(VectorizedTree); + } + return VectorizedTree != 0; + } + +private: + + /// \brief Calcuate the cost of a reduction. + int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { + Type *ScalarTy = FirstReducedVal->getType(); + Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); + + int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); + int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); + + IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; + int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; + + int ScalarReduxCost = + ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); + + DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost + << " for reduction that starts with " << *FirstReducedVal + << " (It is a " + << (IsPairwiseReduction ? "pairwise" : "splitting") + << " reduction)\n"); + + return VecReduxCost - ScalarReduxCost; + } + + static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, + Value *R, const Twine &Name = "") { + if (Opcode == Instruction::FAdd) + return Builder.CreateFAdd(L, R, Name); + return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); + } + + /// \brief Emit a horizontal reduction of the vectorized value. + Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { + assert(VectorizedValue && "Need to have a vectorized tree node"); + Instruction *ValToReduce = dyn_cast(VectorizedValue); + assert(isPowerOf2_32(ReduxWidth) && + "We only handle power-of-two reductions for now"); + + Value *TmpVec = ValToReduce; + for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { + if (IsPairwiseReduction) { + Value *LeftMask = + createRdxShuffleMask(ReduxWidth, i, true, true, Builder); + Value *RightMask = + createRdxShuffleMask(ReduxWidth, i, true, false, Builder); + + Value *LeftShuf = Builder.CreateShuffleVector( + TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); + Value *RightShuf = Builder.CreateShuffleVector( + TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), + "rdx.shuf.r"); + TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, + "bin.rdx"); + } else { + Value *UpperHalf = + createRdxShuffleMask(ReduxWidth, i, false, false, Builder); + Value *Shuf = Builder.CreateShuffleVector( + TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); + TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); + } + } + + // The result is in the first element of the vector. + return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); + } +}; + /// \brief Recognize construction of vectors like /// %ra = insertelement <4 x float> undef, float %s0, i32 0 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 @@ -1916,42 +2481,63 @@ static bool findBuildVector(InsertElementInst *IE, return false; } +static bool PhiTypeSorterFunc(Value *V, Value *V2) { + return V->getType() < V2->getType(); +} + bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { bool Changed = false; SmallVector Incoming; - SmallSet VisitedInstrs; + SmallSet VisitedInstrs; + + bool HaveVectorizedPhiNodes = true; + while (HaveVectorizedPhiNodes) { + HaveVectorizedPhiNodes = false; + + // Collect the incoming values from the PHIs. + Incoming.clear(); + for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; + ++instr) { + PHINode *P = dyn_cast(instr); + if (!P) + break; - // Collect the incoming values from the PHIs. - for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; - ++instr) { - PHINode *P = dyn_cast(instr); + if (!VisitedInstrs.count(P)) + Incoming.push_back(P); + } - if (!P) - break; + // Sort by type. + std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); - // We may go through BB multiple times so skip the one we have checked. - if (!VisitedInstrs.insert(instr)) - continue; + // Try to vectorize elements base on their type. + for (SmallVector::iterator IncIt = Incoming.begin(), + E = Incoming.end(); + IncIt != E;) { - // Stop constructing the list when you reach a different type. - if (Incoming.size() && P->getType() != Incoming[0]->getType()) { - if (tryToVectorizeList(Incoming, R)) { - // We would like to start over since some instructions are deleted - // and the iterator may become invalid value. + // Look for the next elements with the same type. + SmallVector::iterator SameTypeIt = IncIt; + while (SameTypeIt != E && + (*SameTypeIt)->getType() == (*IncIt)->getType()) { + VisitedInstrs.insert(*SameTypeIt); + ++SameTypeIt; + } + + // Try to vectorize them. + unsigned NumElts = (SameTypeIt - IncIt); + DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); + if (NumElts > 1 && + tryToVectorizeList(ArrayRef(IncIt, NumElts), R)) { + // Success start over because instructions might have been changed. + HaveVectorizedPhiNodes = true; Changed = true; - instr = BB->begin(); - ie = BB->end(); + break; } - Incoming.clear(); + // Start over at the next instruction of a different type (or the end). + IncIt = SameTypeIt; } - - Incoming.push_back(P); } - if (Incoming.size() > 1) - Changed |= tryToVectorizeList(Incoming, R); - VisitedInstrs.clear(); for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { @@ -1976,7 +2562,18 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { if (!BI) continue; - Value *Inst = BI->getOperand(0); + // Try to match and vectorize a horizontal reduction. + HorizontalReduction HorRdx; + if (ShouldVectorizeHor && + HorRdx.matchAssociativeReduction(P, BI, DL) && + HorRdx.tryToReduce(R, TTI)) { + Changed = true; + it = BB->begin(); + e = BB->end(); + continue; + } + + Value *Inst = BI->getOperand(0); if (Inst == P) Inst = BI->getOperand(1); @@ -1986,10 +2583,28 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { Changed = true; it = BB->begin(); e = BB->end(); + continue; } + continue; } + // Try to vectorize horizontal reductions feeding into a store. + if (ShouldStartVectorizeHorAtStore) + if (StoreInst *SI = dyn_cast(it)) + if (BinaryOperator *BinOp = + dyn_cast(SI->getValueOperand())) { + HorizontalReduction HorRdx; + if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && + HorRdx.tryToReduce(R, TTI)) || + tryToVectorize(BinOp, R))) { + Changed = true; + it = BB->begin(); + e = BB->end(); + continue; + } + } + // Try to vectorize trees that start at compare instructions. if (CmpInst *CI = dyn_cast(it)) { if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {