1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
18 #define SV_NAME "slp-vectorizer"
19 #define DEBUG_TYPE "SLP"
21 #include "llvm/Transforms/Vectorize.h"
22 #include "llvm/ADT/MapVector.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/ScalarEvolution.h"
27 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/Verifier.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
49 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
50 cl::desc("Only vectorize if you gain more than this "
54 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden,
55 cl::desc("Attempt to vectorize horizontal reductions"));
57 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
58 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
60 "Attempt to vectorize horizontal reductions feeding into a store"));
64 static const unsigned MinVecRegSize = 128;
66 static const unsigned RecursionMaxDepth = 12;
68 /// A helper class for numbering instructions in multiple blocks.
69 /// Numbers start at zero for each basic block.
70 struct BlockNumbering {
72 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {}
74 BlockNumbering() : BB(0), Valid(false) {}
76 void numberInstructions() {
80 // Number the instructions in the block.
81 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
83 InstrVec.push_back(it);
84 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation");
89 int getIndex(Instruction *I) {
90 assert(I->getParent() == BB && "Invalid instruction");
93 assert(InstrIdx.count(I) && "Unknown instruction");
97 Instruction *getInstruction(unsigned loc) {
100 assert(InstrVec.size() > loc && "Invalid Index");
101 return InstrVec[loc];
104 void forget() { Valid = false; }
107 /// The block we are numbering.
109 /// Is the block numbered.
111 /// Maps instructions to numbers and back.
112 SmallDenseMap<Instruction *, int> InstrIdx;
113 /// Maps integers to Instructions.
114 SmallVector<Instruction *, 32> InstrVec;
117 /// \returns the parent basic block if all of the instructions in \p VL
118 /// are in the same block or null otherwise.
119 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
120 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
123 BasicBlock *BB = I0->getParent();
124 for (int i = 1, e = VL.size(); i < e; i++) {
125 Instruction *I = dyn_cast<Instruction>(VL[i]);
129 if (BB != I->getParent())
135 /// \returns True if all of the values in \p VL are constants.
136 static bool allConstant(ArrayRef<Value *> VL) {
137 for (unsigned i = 0, e = VL.size(); i < e; ++i)
138 if (!isa<Constant>(VL[i]))
143 /// \returns True if all of the values in \p VL are identical.
144 static bool isSplat(ArrayRef<Value *> VL) {
145 for (unsigned i = 1, e = VL.size(); i < e; ++i)
151 /// \returns The opcode if all of the Instructions in \p VL have the same
153 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
154 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
157 unsigned Opcode = I0->getOpcode();
158 for (int i = 1, e = VL.size(); i < e; i++) {
159 Instruction *I = dyn_cast<Instruction>(VL[i]);
160 if (!I || Opcode != I->getOpcode())
166 /// \returns The type that all of the values in \p VL have or null if there
167 /// are different types.
168 static Type* getSameType(ArrayRef<Value *> VL) {
169 Type *Ty = VL[0]->getType();
170 for (int i = 1, e = VL.size(); i < e; i++)
171 if (VL[i]->getType() != Ty)
177 /// \returns True if the ExtractElement instructions in VL can be vectorized
178 /// to use the original vector.
179 static bool CanReuseExtract(ArrayRef<Value *> VL) {
180 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode");
181 // Check if all of the extracts come from the same vector and from the
184 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0);
185 Value *Vec = E0->getOperand(0);
187 // We have to extract from the same vector type.
188 unsigned NElts = Vec->getType()->getVectorNumElements();
190 if (NElts != VL.size())
193 // Check that all of the indices extract from the correct offset.
194 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1));
195 if (!CI || CI->getZExtValue())
198 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
199 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
200 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
202 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec)
209 /// Bottom Up SLP Vectorizer.
212 typedef SmallVector<Value *, 8> ValueList;
213 typedef SmallVector<Instruction *, 16> InstrList;
214 typedef SmallPtrSet<Value *, 16> ValueSet;
215 typedef SmallVector<StoreInst *, 8> StoreList;
217 BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl,
218 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li,
220 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt),
221 Builder(Se->getContext()) {
222 // Setup the block numbering utility for all of the blocks in the
224 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) {
226 BlocksNumbers[BB] = BlockNumbering(BB);
230 /// \brief Vectorize the tree that starts with the elements in \p VL.
231 /// Returns the vectorized root.
232 Value *vectorizeTree();
234 /// \returns the vectorization cost of the subtree that starts at \p VL.
235 /// A negative number means that this is profitable.
238 /// Construct a vectorizable tree that starts at \p Roots and is possibly
239 /// used by a reduction of \p RdxOps.
240 void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0);
242 /// Clear the internal data structures that are created by 'buildTree'.
245 VectorizableTree.clear();
246 ScalarToTreeEntry.clear();
248 ExternalUses.clear();
249 MemBarrierIgnoreList.clear();
252 /// \returns true if the memory operations A and B are consecutive.
253 bool isConsecutiveAccess(Value *A, Value *B);
255 /// \brief Perform LICM and CSE on the newly generated gather sequences.
256 void optimizeGatherSequence();
260 /// \returns the cost of the vectorizable entry.
261 int getEntryCost(TreeEntry *E);
263 /// This is the recursive part of buildTree.
264 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
266 /// Vectorize a single entry in the tree.
267 Value *vectorizeTree(TreeEntry *E);
269 /// Vectorize a single entry in the tree, starting in \p VL.
270 Value *vectorizeTree(ArrayRef<Value *> VL);
272 /// \returns the pointer to the vectorized value if \p VL is already
273 /// vectorized, or NULL. They may happen in cycles.
274 Value *alreadyVectorized(ArrayRef<Value *> VL) const;
276 /// \brief Take the pointer operand from the Load/Store instruction.
277 /// \returns NULL if this is not a valid Load/Store instruction.
278 static Value *getPointerOperand(Value *I);
280 /// \brief Take the address space operand from the Load/Store instruction.
281 /// \returns -1 if this is not a valid Load/Store instruction.
282 static unsigned getAddressSpaceOperand(Value *I);
284 /// \returns the scalarization cost for this type. Scalarization in this
285 /// context means the creation of vectors from a group of scalars.
286 int getGatherCost(Type *Ty);
288 /// \returns the scalarization cost for this list of values. Assuming that
289 /// this subtree gets vectorized, we may need to extract the values from the
290 /// roots. This method calculates the cost of extracting the values.
291 int getGatherCost(ArrayRef<Value *> VL);
293 /// \returns the AA location that is being access by the instruction.
294 AliasAnalysis::Location getLocation(Instruction *I);
296 /// \brief Checks if it is possible to sink an instruction from
297 /// \p Src to \p Dst.
298 /// \returns the pointer to the barrier instruction if we can't sink.
299 Value *getSinkBarrier(Instruction *Src, Instruction *Dst);
301 /// \returns the index of the last instruction in the BB from \p VL.
302 int getLastIndex(ArrayRef<Value *> VL);
304 /// \returns the Instruction in the bundle \p VL.
305 Instruction *getLastInstruction(ArrayRef<Value *> VL);
307 /// \brief Set the Builder insert point to one after the last instruction in
309 void setInsertPointAfterBundle(ArrayRef<Value *> VL);
311 /// \returns a vector from a collection of scalars in \p VL.
312 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
315 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0),
318 /// \returns true if the scalars in VL are equal to this entry.
319 bool isSame(ArrayRef<Value *> VL) const {
320 assert(VL.size() == Scalars.size() && "Invalid size");
321 for (int i = 0, e = VL.size(); i != e; ++i)
322 if (VL[i] != Scalars[i])
327 /// A vector of scalars.
330 /// The Scalars are vectorized into this value. It is initialized to Null.
331 Value *VectorizedValue;
333 /// The index in the basic block of the last scalar.
336 /// Do we need to gather this sequence ?
340 /// Create a new VectorizableTree entry.
341 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) {
342 VectorizableTree.push_back(TreeEntry());
343 int idx = VectorizableTree.size() - 1;
344 TreeEntry *Last = &VectorizableTree[idx];
345 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
346 Last->NeedToGather = !Vectorized;
348 Last->LastScalarIndex = getLastIndex(VL);
349 for (int i = 0, e = VL.size(); i != e; ++i) {
350 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
351 ScalarToTreeEntry[VL[i]] = idx;
354 Last->LastScalarIndex = 0;
355 MustGather.insert(VL.begin(), VL.end());
360 /// -- Vectorization State --
361 /// Holds all of the tree entries.
362 std::vector<TreeEntry> VectorizableTree;
364 /// Maps a specific scalar to its tree entry.
365 SmallDenseMap<Value*, int> ScalarToTreeEntry;
367 /// A list of scalars that we found that we need to keep as scalars.
370 /// This POD struct describes one external user in the vectorized tree.
371 struct ExternalUser {
372 ExternalUser (Value *S, llvm::User *U, int L) :
373 Scalar(S), User(U), Lane(L){};
374 // Which scalar in our function.
376 // Which user that uses the scalar.
378 // Which lane does the scalar belong to.
381 typedef SmallVector<ExternalUser, 16> UserList;
383 /// A list of values that need to extracted out of the tree.
384 /// This list holds pairs of (Internal Scalar : External User).
385 UserList ExternalUses;
387 /// A list of instructions to ignore while sinking
388 /// memory instructions. This map must be reset between runs of getCost.
389 ValueSet MemBarrierIgnoreList;
391 /// Holds all of the instructions that we gathered.
392 SetVector<Instruction *> GatherSeq;
394 /// Numbers instructions in different blocks.
395 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers;
397 /// Reduction operators.
400 // Analysis and block reference.
404 TargetTransformInfo *TTI;
408 /// Instruction builder to construct the vectorized tree.
412 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
415 if (!getSameType(Roots))
417 buildTree_rec(Roots, 0);
419 // Collect the values that we need to extract from the tree.
420 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
421 TreeEntry *Entry = &VectorizableTree[EIdx];
424 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
425 Value *Scalar = Entry->Scalars[Lane];
427 // No need to handle users of gathered values.
428 if (Entry->NeedToGather)
431 for (Value::use_iterator User = Scalar->use_begin(),
432 UE = Scalar->use_end(); User != UE; ++User) {
433 DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n");
435 bool Gathered = MustGather.count(*User);
437 // Skip in-tree scalars that become vectors.
438 if (ScalarToTreeEntry.count(*User) && !Gathered) {
439 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" <<
441 int Idx = ScalarToTreeEntry[*User]; (void) Idx;
442 assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
445 Instruction *UserInst = dyn_cast<Instruction>(*User);
449 // Ignore uses that are part of the reduction.
450 if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end())
453 DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " <<
454 Lane << " from " << *Scalar << ".\n");
455 ExternalUses.push_back(ExternalUser(Scalar, *User, Lane));
462 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
463 bool SameTy = getSameType(VL); (void)SameTy;
464 assert(SameTy && "Invalid types!");
466 if (Depth == RecursionMaxDepth) {
467 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
468 newTreeEntry(VL, false);
472 // Don't handle vectors.
473 if (VL[0]->getType()->isVectorTy()) {
474 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
475 newTreeEntry(VL, false);
479 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
480 if (SI->getValueOperand()->getType()->isVectorTy()) {
481 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
482 newTreeEntry(VL, false);
486 // If all of the operands are identical or constant we have a simple solution.
487 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) ||
488 !getSameOpcode(VL)) {
489 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
490 newTreeEntry(VL, false);
494 // We now know that this is a vector of instructions of the same type from
497 // Check if this is a duplicate of another entry.
498 if (ScalarToTreeEntry.count(VL[0])) {
499 int Idx = ScalarToTreeEntry[VL[0]];
500 TreeEntry *E = &VectorizableTree[Idx];
501 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
502 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
503 if (E->Scalars[i] != VL[i]) {
504 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
505 newTreeEntry(VL, false);
509 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
513 // Check that none of the instructions in the bundle are already in the tree.
514 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
515 if (ScalarToTreeEntry.count(VL[i])) {
516 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
517 ") is already in tree.\n");
518 newTreeEntry(VL, false);
523 // If any of the scalars appears in the table OR it is marked as a value that
524 // needs to stat scalar then we need to gather the scalars.
525 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
526 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) {
527 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n");
528 newTreeEntry(VL, false);
533 // Check that all of the users of the scalars that we want to vectorize are
535 Instruction *VL0 = cast<Instruction>(VL[0]);
536 int MyLastIndex = getLastIndex(VL);
537 BasicBlock *BB = cast<Instruction>(VL0)->getParent();
539 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
540 Instruction *Scalar = cast<Instruction>(VL[i]);
541 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n");
542 for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end();
544 DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n");
545 Instruction *User = dyn_cast<Instruction>(*U);
547 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n");
548 newTreeEntry(VL, false);
552 // We don't care if the user is in a different basic block.
553 BasicBlock *UserBlock = User->getParent();
554 if (UserBlock != BB) {
555 DEBUG(dbgs() << "SLP: User from a different basic block "
560 // If this is a PHINode within this basic block then we can place the
561 // extract wherever we want.
562 if (isa<PHINode>(*User)) {
563 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n");
567 // Check if this is a safe in-tree user.
568 if (ScalarToTreeEntry.count(User)) {
569 int Idx = ScalarToTreeEntry[User];
570 int VecLocation = VectorizableTree[Idx].LastScalarIndex;
571 if (VecLocation <= MyLastIndex) {
572 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n");
573 newTreeEntry(VL, false);
576 DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" <<
577 VecLocation << " vector value (" << *Scalar << ") at #"
578 << MyLastIndex << ".\n");
582 // This user is part of the reduction.
583 if (RdxOps && RdxOps->count(User))
586 // Make sure that we can schedule this unknown user.
587 BlockNumbering &BN = BlocksNumbers[BB];
588 int UserIndex = BN.getIndex(User);
589 if (UserIndex < MyLastIndex) {
591 DEBUG(dbgs() << "SLP: Can't schedule extractelement for "
593 newTreeEntry(VL, false);
599 // Check that every instructions appears once in this bundle.
600 for (unsigned i = 0, e = VL.size(); i < e; ++i)
601 for (unsigned j = i+1; j < e; ++j)
602 if (VL[i] == VL[j]) {
603 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
604 newTreeEntry(VL, false);
608 // Check that instructions in this bundle don't reference other instructions.
609 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4.
610 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
611 for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end();
613 for (unsigned j = 0; j < e; ++j) {
614 if (i != j && *U == VL[j]) {
615 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n");
616 newTreeEntry(VL, false);
623 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
625 unsigned Opcode = getSameOpcode(VL);
627 // Check if it is safe to sink the loads or the stores.
628 if (Opcode == Instruction::Load || Opcode == Instruction::Store) {
629 Instruction *Last = getLastInstruction(VL);
631 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
634 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last);
636 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last
637 << "\n because of " << *Barrier << ". Gathering.\n");
638 newTreeEntry(VL, false);
645 case Instruction::PHI: {
646 PHINode *PH = dyn_cast<PHINode>(VL0);
648 // Check for terminator values (e.g. invoke).
649 for (unsigned j = 0; j < VL.size(); ++j)
650 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
651 TerminatorInst *Term = dyn_cast<TerminatorInst>(cast<PHINode>(VL[j])->getIncomingValue(i));
653 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
654 newTreeEntry(VL, false);
659 newTreeEntry(VL, true);
660 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
662 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
664 // Prepare the operand vector.
665 for (unsigned j = 0; j < VL.size(); ++j)
666 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i));
668 buildTree_rec(Operands, Depth + 1);
672 case Instruction::ExtractElement: {
673 bool Reuse = CanReuseExtract(VL);
675 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
677 newTreeEntry(VL, Reuse);
680 case Instruction::Load: {
681 // Check if the loads are consecutive or of we need to swizzle them.
682 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
683 if (!isConsecutiveAccess(VL[i], VL[i + 1])) {
684 newTreeEntry(VL, false);
685 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n");
689 newTreeEntry(VL, true);
690 DEBUG(dbgs() << "SLP: added a vector of loads.\n");
693 case Instruction::ZExt:
694 case Instruction::SExt:
695 case Instruction::FPToUI:
696 case Instruction::FPToSI:
697 case Instruction::FPExt:
698 case Instruction::PtrToInt:
699 case Instruction::IntToPtr:
700 case Instruction::SIToFP:
701 case Instruction::UIToFP:
702 case Instruction::Trunc:
703 case Instruction::FPTrunc:
704 case Instruction::BitCast: {
705 Type *SrcTy = VL0->getOperand(0)->getType();
706 for (unsigned i = 0; i < VL.size(); ++i) {
707 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
708 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) {
709 newTreeEntry(VL, false);
710 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
714 newTreeEntry(VL, true);
715 DEBUG(dbgs() << "SLP: added a vector of casts.\n");
717 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
719 // Prepare the operand vector.
720 for (unsigned j = 0; j < VL.size(); ++j)
721 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
723 buildTree_rec(Operands, Depth+1);
727 case Instruction::ICmp:
728 case Instruction::FCmp: {
729 // Check that all of the compares have the same predicate.
730 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
731 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
732 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
733 CmpInst *Cmp = cast<CmpInst>(VL[i]);
734 if (Cmp->getPredicate() != P0 ||
735 Cmp->getOperand(0)->getType() != ComparedTy) {
736 newTreeEntry(VL, false);
737 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
742 newTreeEntry(VL, true);
743 DEBUG(dbgs() << "SLP: added a vector of compares.\n");
745 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
747 // Prepare the operand vector.
748 for (unsigned j = 0; j < VL.size(); ++j)
749 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
751 buildTree_rec(Operands, Depth+1);
755 case Instruction::Select:
756 case Instruction::Add:
757 case Instruction::FAdd:
758 case Instruction::Sub:
759 case Instruction::FSub:
760 case Instruction::Mul:
761 case Instruction::FMul:
762 case Instruction::UDiv:
763 case Instruction::SDiv:
764 case Instruction::FDiv:
765 case Instruction::URem:
766 case Instruction::SRem:
767 case Instruction::FRem:
768 case Instruction::Shl:
769 case Instruction::LShr:
770 case Instruction::AShr:
771 case Instruction::And:
772 case Instruction::Or:
773 case Instruction::Xor: {
774 newTreeEntry(VL, true);
775 DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
777 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
779 // Prepare the operand vector.
780 for (unsigned j = 0; j < VL.size(); ++j)
781 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
783 buildTree_rec(Operands, Depth+1);
787 case Instruction::Store: {
788 // Check if the stores are consecutive or of we need to swizzle them.
789 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
790 if (!isConsecutiveAccess(VL[i], VL[i + 1])) {
791 newTreeEntry(VL, false);
792 DEBUG(dbgs() << "SLP: Non consecutive store.\n");
796 newTreeEntry(VL, true);
797 DEBUG(dbgs() << "SLP: added a vector of stores.\n");
800 for (unsigned j = 0; j < VL.size(); ++j)
801 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0));
803 // We can ignore these values because we are sinking them down.
804 MemBarrierIgnoreList.insert(VL.begin(), VL.end());
805 buildTree_rec(Operands, Depth + 1);
809 newTreeEntry(VL, false);
810 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
815 int BoUpSLP::getEntryCost(TreeEntry *E) {
816 ArrayRef<Value*> VL = E->Scalars;
818 Type *ScalarTy = VL[0]->getType();
819 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
820 ScalarTy = SI->getValueOperand()->getType();
821 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
823 if (E->NeedToGather) {
827 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
829 return getGatherCost(E->Scalars);
832 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) &&
834 Instruction *VL0 = cast<Instruction>(VL[0]);
835 unsigned Opcode = VL0->getOpcode();
837 case Instruction::PHI: {
840 case Instruction::ExtractElement: {
841 if (CanReuseExtract(VL))
843 return getGatherCost(VecTy);
845 case Instruction::ZExt:
846 case Instruction::SExt:
847 case Instruction::FPToUI:
848 case Instruction::FPToSI:
849 case Instruction::FPExt:
850 case Instruction::PtrToInt:
851 case Instruction::IntToPtr:
852 case Instruction::SIToFP:
853 case Instruction::UIToFP:
854 case Instruction::Trunc:
855 case Instruction::FPTrunc:
856 case Instruction::BitCast: {
857 Type *SrcTy = VL0->getOperand(0)->getType();
859 // Calculate the cost of this instruction.
860 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
861 VL0->getType(), SrcTy);
863 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
864 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
865 return VecCost - ScalarCost;
867 case Instruction::FCmp:
868 case Instruction::ICmp:
869 case Instruction::Select:
870 case Instruction::Add:
871 case Instruction::FAdd:
872 case Instruction::Sub:
873 case Instruction::FSub:
874 case Instruction::Mul:
875 case Instruction::FMul:
876 case Instruction::UDiv:
877 case Instruction::SDiv:
878 case Instruction::FDiv:
879 case Instruction::URem:
880 case Instruction::SRem:
881 case Instruction::FRem:
882 case Instruction::Shl:
883 case Instruction::LShr:
884 case Instruction::AShr:
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor: {
888 // Calculate the cost of this instruction.
891 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp ||
892 Opcode == Instruction::Select) {
893 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
894 ScalarCost = VecTy->getNumElements() *
895 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
896 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
898 ScalarCost = VecTy->getNumElements() *
899 TTI->getArithmeticInstrCost(Opcode, ScalarTy);
900 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy);
902 return VecCost - ScalarCost;
904 case Instruction::Load: {
905 // Cost of wide load - cost of scalar loads.
906 int ScalarLdCost = VecTy->getNumElements() *
907 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
908 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
909 return VecLdCost - ScalarLdCost;
911 case Instruction::Store: {
912 // We know that we can merge the stores. Calculate the cost.
913 int ScalarStCost = VecTy->getNumElements() *
914 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0);
915 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0);
916 return VecStCost - ScalarStCost;
919 llvm_unreachable("Unknown instruction");
923 int BoUpSLP::getTreeCost() {
925 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
926 VectorizableTree.size() << ".\n");
928 // Don't vectorize tiny trees. Small load/store chains or consecutive stores
929 // of constants will be vectoried in SelectionDAG in MergeConsecutiveStores.
930 // The SelectionDAG vectorizer can only handle pairs (trees of height = 2).
931 if (VectorizableTree.size() < 3) {
932 if (!VectorizableTree.size()) {
933 assert(!ExternalUses.size() && "We should not have any external users");
938 unsigned BundleWidth = VectorizableTree[0].Scalars.size();
940 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) {
941 int C = getEntryCost(&VectorizableTree[i]);
942 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
943 << *VectorizableTree[i].Scalars[0] << " .\n");
948 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end();
951 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth);
952 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
957 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n");
958 return Cost + ExtractCost;
961 int BoUpSLP::getGatherCost(Type *Ty) {
963 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
964 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
968 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
969 // Find the type of the operands in VL.
970 Type *ScalarTy = VL[0]->getType();
971 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
972 ScalarTy = SI->getValueOperand()->getType();
973 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
974 // Find the cost of inserting/extracting values from the vector.
975 return getGatherCost(VecTy);
978 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) {
979 if (StoreInst *SI = dyn_cast<StoreInst>(I))
980 return AA->getLocation(SI);
981 if (LoadInst *LI = dyn_cast<LoadInst>(I))
982 return AA->getLocation(LI);
983 return AliasAnalysis::Location();
986 Value *BoUpSLP::getPointerOperand(Value *I) {
987 if (LoadInst *LI = dyn_cast<LoadInst>(I))
988 return LI->getPointerOperand();
989 if (StoreInst *SI = dyn_cast<StoreInst>(I))
990 return SI->getPointerOperand();
994 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) {
995 if (LoadInst *L = dyn_cast<LoadInst>(I))
996 return L->getPointerAddressSpace();
997 if (StoreInst *S = dyn_cast<StoreInst>(I))
998 return S->getPointerAddressSpace();
1002 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) {
1003 Value *PtrA = getPointerOperand(A);
1004 Value *PtrB = getPointerOperand(B);
1005 unsigned ASA = getAddressSpaceOperand(A);
1006 unsigned ASB = getAddressSpaceOperand(B);
1008 // Check that the address spaces match and that the pointers are valid.
1009 if (!PtrA || !PtrB || (ASA != ASB))
1012 // Make sure that A and B are different pointers of the same type.
1013 if (PtrA == PtrB || PtrA->getType() != PtrB->getType())
1016 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA);
1017 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1018 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty));
1020 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
1021 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA);
1022 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB);
1024 APInt OffsetDelta = OffsetB - OffsetA;
1026 // Check if they are based on the same pointer. That makes the offsets
1029 return OffsetDelta == Size;
1031 // Compute the necessary base pointer delta to have the necessary final delta
1032 // equal to the size.
1033 APInt BaseDelta = Size - OffsetDelta;
1035 // Otherwise compute the distance with SCEV between the base pointers.
1036 const SCEV *PtrSCEVA = SE->getSCEV(PtrA);
1037 const SCEV *PtrSCEVB = SE->getSCEV(PtrB);
1038 const SCEV *C = SE->getConstant(BaseDelta);
1039 const SCEV *X = SE->getAddExpr(PtrSCEVA, C);
1040 return X == PtrSCEVB;
1043 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) {
1044 assert(Src->getParent() == Dst->getParent() && "Not the same BB");
1045 BasicBlock::iterator I = Src, E = Dst;
1046 /// Scan all of the instruction from SRC to DST and check if
1047 /// the source may alias.
1048 for (++I; I != E; ++I) {
1049 // Ignore store instructions that are marked as 'ignore'.
1050 if (MemBarrierIgnoreList.count(I))
1052 if (Src->mayWriteToMemory()) /* Write */ {
1053 if (!I->mayReadOrWriteMemory())
1056 if (!I->mayWriteToMemory())
1059 AliasAnalysis::Location A = getLocation(&*I);
1060 AliasAnalysis::Location B = getLocation(Src);
1062 if (!A.Ptr || !B.Ptr || AA->alias(A, B))
1068 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) {
1069 BasicBlock *BB = cast<Instruction>(VL[0])->getParent();
1070 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block");
1071 BlockNumbering &BN = BlocksNumbers[BB];
1073 int MaxIdx = BN.getIndex(BB->getFirstNonPHI());
1074 for (unsigned i = 0, e = VL.size(); i < e; ++i)
1075 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i])));
1079 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) {
1080 BasicBlock *BB = cast<Instruction>(VL[0])->getParent();
1081 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block");
1082 BlockNumbering &BN = BlocksNumbers[BB];
1084 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0]));
1085 for (unsigned i = 1, e = VL.size(); i < e; ++i)
1086 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i])));
1087 Instruction *I = BN.getInstruction(MaxIdx);
1088 assert(I && "bad location");
1092 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
1093 Instruction *VL0 = cast<Instruction>(VL[0]);
1094 Instruction *LastInst = getLastInstruction(VL);
1095 BasicBlock::iterator NextInst = LastInst;
1097 Builder.SetInsertPoint(VL0->getParent(), NextInst);
1098 Builder.SetCurrentDebugLocation(VL0->getDebugLoc());
1101 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
1102 Value *Vec = UndefValue::get(Ty);
1103 // Generate the 'InsertElement' instruction.
1104 for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
1105 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
1106 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
1107 GatherSeq.insert(Insrt);
1109 // Add to our 'need-to-extract' list.
1110 if (ScalarToTreeEntry.count(VL[i])) {
1111 int Idx = ScalarToTreeEntry[VL[i]];
1112 TreeEntry *E = &VectorizableTree[Idx];
1113 // Find which lane we need to extract.
1115 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
1116 // Is this the lane of the scalar that we are looking for ?
1117 if (E->Scalars[Lane] == VL[i]) {
1122 assert(FoundLane >= 0 && "Could not find the correct lane");
1123 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
1131 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
1132 SmallDenseMap<Value*, int>::const_iterator Entry
1133 = ScalarToTreeEntry.find(VL[0]);
1134 if (Entry != ScalarToTreeEntry.end()) {
1135 int Idx = Entry->second;
1136 const TreeEntry *En = &VectorizableTree[Idx];
1137 if (En->isSame(VL) && En->VectorizedValue)
1138 return En->VectorizedValue;
1143 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
1144 if (ScalarToTreeEntry.count(VL[0])) {
1145 int Idx = ScalarToTreeEntry[VL[0]];
1146 TreeEntry *E = &VectorizableTree[Idx];
1148 return vectorizeTree(E);
1151 Type *ScalarTy = VL[0]->getType();
1152 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1153 ScalarTy = SI->getValueOperand()->getType();
1154 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1156 return Gather(VL, VecTy);
1159 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
1160 IRBuilder<>::InsertPointGuard Guard(Builder);
1162 if (E->VectorizedValue) {
1163 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
1164 return E->VectorizedValue;
1167 Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
1168 Type *ScalarTy = VL0->getType();
1169 if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
1170 ScalarTy = SI->getValueOperand()->getType();
1171 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
1173 if (E->NeedToGather) {
1174 setInsertPointAfterBundle(E->Scalars);
1175 return Gather(E->Scalars, VecTy);
1178 unsigned Opcode = VL0->getOpcode();
1179 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode");
1182 case Instruction::PHI: {
1183 PHINode *PH = dyn_cast<PHINode>(VL0);
1184 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
1185 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
1186 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
1187 E->VectorizedValue = NewPhi;
1189 // PHINodes may have multiple entries from the same block. We want to
1190 // visit every block once.
1191 SmallSet<BasicBlock*, 4> VisitedBBs;
1193 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1195 BasicBlock *IBB = PH->getIncomingBlock(i);
1197 if (!VisitedBBs.insert(IBB)) {
1198 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
1202 // Prepare the operand vector.
1203 for (unsigned j = 0; j < E->Scalars.size(); ++j)
1204 Operands.push_back(cast<PHINode>(E->Scalars[j])->
1205 getIncomingValueForBlock(IBB));
1207 Builder.SetInsertPoint(IBB->getTerminator());
1208 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
1209 Value *Vec = vectorizeTree(Operands);
1210 NewPhi->addIncoming(Vec, IBB);
1213 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
1214 "Invalid number of incoming values");
1218 case Instruction::ExtractElement: {
1219 if (CanReuseExtract(E->Scalars)) {
1220 Value *V = VL0->getOperand(0);
1221 E->VectorizedValue = V;
1224 return Gather(E->Scalars, VecTy);
1226 case Instruction::ZExt:
1227 case Instruction::SExt:
1228 case Instruction::FPToUI:
1229 case Instruction::FPToSI:
1230 case Instruction::FPExt:
1231 case Instruction::PtrToInt:
1232 case Instruction::IntToPtr:
1233 case Instruction::SIToFP:
1234 case Instruction::UIToFP:
1235 case Instruction::Trunc:
1236 case Instruction::FPTrunc:
1237 case Instruction::BitCast: {
1239 for (int i = 0, e = E->Scalars.size(); i < e; ++i)
1240 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
1242 setInsertPointAfterBundle(E->Scalars);
1244 Value *InVec = vectorizeTree(INVL);
1246 if (Value *V = alreadyVectorized(E->Scalars))
1249 CastInst *CI = dyn_cast<CastInst>(VL0);
1250 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
1251 E->VectorizedValue = V;
1254 case Instruction::FCmp:
1255 case Instruction::ICmp: {
1256 ValueList LHSV, RHSV;
1257 for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
1258 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
1259 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
1262 setInsertPointAfterBundle(E->Scalars);
1264 Value *L = vectorizeTree(LHSV);
1265 Value *R = vectorizeTree(RHSV);
1267 if (Value *V = alreadyVectorized(E->Scalars))
1270 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
1272 if (Opcode == Instruction::FCmp)
1273 V = Builder.CreateFCmp(P0, L, R);
1275 V = Builder.CreateICmp(P0, L, R);
1277 E->VectorizedValue = V;
1280 case Instruction::Select: {
1281 ValueList TrueVec, FalseVec, CondVec;
1282 for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
1283 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
1284 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
1285 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2));
1288 setInsertPointAfterBundle(E->Scalars);
1290 Value *Cond = vectorizeTree(CondVec);
1291 Value *True = vectorizeTree(TrueVec);
1292 Value *False = vectorizeTree(FalseVec);
1294 if (Value *V = alreadyVectorized(E->Scalars))
1297 Value *V = Builder.CreateSelect(Cond, True, False);
1298 E->VectorizedValue = V;
1301 case Instruction::Add:
1302 case Instruction::FAdd:
1303 case Instruction::Sub:
1304 case Instruction::FSub:
1305 case Instruction::Mul:
1306 case Instruction::FMul:
1307 case Instruction::UDiv:
1308 case Instruction::SDiv:
1309 case Instruction::FDiv:
1310 case Instruction::URem:
1311 case Instruction::SRem:
1312 case Instruction::FRem:
1313 case Instruction::Shl:
1314 case Instruction::LShr:
1315 case Instruction::AShr:
1316 case Instruction::And:
1317 case Instruction::Or:
1318 case Instruction::Xor: {
1319 ValueList LHSVL, RHSVL;
1320 for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
1321 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
1322 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
1325 setInsertPointAfterBundle(E->Scalars);
1327 Value *LHS = vectorizeTree(LHSVL);
1328 Value *RHS = vectorizeTree(RHSVL);
1330 if (LHS == RHS && isa<Instruction>(LHS)) {
1331 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order");
1334 if (Value *V = alreadyVectorized(E->Scalars))
1337 BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
1338 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
1339 E->VectorizedValue = V;
1342 case Instruction::Load: {
1343 // Loads are inserted at the head of the tree because we don't want to
1344 // sink them all the way down past store instructions.
1345 setInsertPointAfterBundle(E->Scalars);
1347 LoadInst *LI = cast<LoadInst>(VL0);
1348 unsigned AS = LI->getPointerAddressSpace();
1350 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
1351 VecTy->getPointerTo(AS));
1352 unsigned Alignment = LI->getAlignment();
1353 LI = Builder.CreateLoad(VecPtr);
1354 LI->setAlignment(Alignment);
1355 E->VectorizedValue = LI;
1358 case Instruction::Store: {
1359 StoreInst *SI = cast<StoreInst>(VL0);
1360 unsigned Alignment = SI->getAlignment();
1361 unsigned AS = SI->getPointerAddressSpace();
1364 for (int i = 0, e = E->Scalars.size(); i < e; ++i)
1365 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand());
1367 setInsertPointAfterBundle(E->Scalars);
1369 Value *VecValue = vectorizeTree(ValueOp);
1370 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
1371 VecTy->getPointerTo(AS));
1372 StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
1373 S->setAlignment(Alignment);
1374 E->VectorizedValue = S;
1378 llvm_unreachable("unknown inst");
1383 Value *BoUpSLP::vectorizeTree() {
1384 Builder.SetInsertPoint(F->getEntryBlock().begin());
1385 vectorizeTree(&VectorizableTree[0]);
1387 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
1389 // Extract all of the elements with the external uses.
1390 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end();
1392 Value *Scalar = it->Scalar;
1393 llvm::User *User = it->User;
1395 // Skip users that we already RAUW. This happens when one instruction
1396 // has multiple uses of the same value.
1397 if (std::find(Scalar->use_begin(), Scalar->use_end(), User) ==
1400 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
1402 int Idx = ScalarToTreeEntry[Scalar];
1403 TreeEntry *E = &VectorizableTree[Idx];
1404 assert(!E->NeedToGather && "Extracting from a gather list");
1406 Value *Vec = E->VectorizedValue;
1407 assert(Vec && "Can't find vectorizable value");
1409 Value *Lane = Builder.getInt32(it->Lane);
1410 // Generate extracts for out-of-tree users.
1411 // Find the insertion point for the extractelement lane.
1412 if (PHINode *PN = dyn_cast<PHINode>(Vec)) {
1413 Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt());
1414 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
1415 User->replaceUsesOfWith(Scalar, Ex);
1416 } else if (isa<Instruction>(Vec)){
1417 if (PHINode *PH = dyn_cast<PHINode>(User)) {
1418 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
1419 if (PH->getIncomingValue(i) == Scalar) {
1420 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
1421 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
1422 PH->setOperand(i, Ex);
1426 Builder.SetInsertPoint(cast<Instruction>(User));
1427 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
1428 User->replaceUsesOfWith(Scalar, Ex);
1431 Builder.SetInsertPoint(F->getEntryBlock().begin());
1432 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
1433 User->replaceUsesOfWith(Scalar, Ex);
1436 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
1439 // For each vectorized value:
1440 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
1441 TreeEntry *Entry = &VectorizableTree[EIdx];
1444 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
1445 Value *Scalar = Entry->Scalars[Lane];
1447 // No need to handle users of gathered values.
1448 if (Entry->NeedToGather)
1451 assert(Entry->VectorizedValue && "Can't find vectorizable value");
1453 Type *Ty = Scalar->getType();
1454 if (!Ty->isVoidTy()) {
1455 for (Value::use_iterator User = Scalar->use_begin(),
1456 UE = Scalar->use_end(); User != UE; ++User) {
1457 DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n");
1458 assert(!MustGather.count(*User) &&
1459 "Replacing gathered value with undef");
1461 assert((ScalarToTreeEntry.count(*User) ||
1462 // It is legal to replace the reduction users by undef.
1463 (RdxOps && RdxOps->count(*User))) &&
1464 "Replacing out-of-tree value with undef");
1466 Value *Undef = UndefValue::get(Ty);
1467 Scalar->replaceAllUsesWith(Undef);
1469 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
1470 cast<Instruction>(Scalar)->eraseFromParent();
1474 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) {
1475 BlocksNumbers[it].forget();
1477 Builder.ClearInsertionPoint();
1479 return VectorizableTree[0].VectorizedValue;
1482 void BoUpSLP::optimizeGatherSequence() {
1483 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
1484 << " gather sequences instructions.\n");
1485 // LICM InsertElementInst sequences.
1486 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(),
1487 e = GatherSeq.end(); it != e; ++it) {
1488 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it);
1493 // Check if this block is inside a loop.
1494 Loop *L = LI->getLoopFor(Insert->getParent());
1498 // Check if it has a preheader.
1499 BasicBlock *PreHeader = L->getLoopPreheader();
1503 // If the vector or the element that we insert into it are
1504 // instructions that are defined in this basic block then we can't
1505 // hoist this instruction.
1506 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
1507 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
1508 if (CurrVec && L->contains(CurrVec))
1510 if (NewElem && L->contains(NewElem))
1513 // We can hoist this instruction. Move it to the pre-header.
1514 Insert->moveBefore(PreHeader->getTerminator());
1517 // Perform O(N^2) search over the gather sequences and merge identical
1518 // instructions. TODO: We can further optimize this scan if we split the
1519 // instructions into different buckets based on the insert lane.
1520 SmallPtrSet<Instruction*, 16> Visited;
1521 SmallVector<Instruction*, 16> ToRemove;
1522 ReversePostOrderTraversal<Function*> RPOT(F);
1523 for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
1524 E = RPOT.end(); I != E; ++I) {
1525 BasicBlock *BB = *I;
1526 // For all instructions in the function:
1527 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
1528 Instruction *In = it;
1529 if ((!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) ||
1530 !GatherSeq.count(In))
1533 // Check if we can replace this instruction with any of the
1534 // visited instructions.
1535 for (SmallPtrSet<Instruction*, 16>::iterator v = Visited.begin(),
1536 ve = Visited.end(); v != ve; ++v) {
1537 if (In->isIdenticalTo(*v) &&
1538 DT->dominates((*v)->getParent(), In->getParent())) {
1539 In->replaceAllUsesWith(*v);
1540 ToRemove.push_back(In);
1550 // Erase all of the instructions that we RAUWed.
1551 for (SmallVectorImpl<Instruction *>::iterator v = ToRemove.begin(),
1552 ve = ToRemove.end(); v != ve; ++v) {
1553 assert((*v)->getNumUses() == 0 && "Can't remove instructions with uses");
1554 (*v)->eraseFromParent();
1558 /// The SLPVectorizer Pass.
1559 struct SLPVectorizer : public FunctionPass {
1560 typedef SmallVector<StoreInst *, 8> StoreList;
1561 typedef MapVector<Value *, StoreList> StoreListMap;
1563 /// Pass identification, replacement for typeid
1566 explicit SLPVectorizer() : FunctionPass(ID) {
1567 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
1570 ScalarEvolution *SE;
1572 TargetTransformInfo *TTI;
1577 virtual bool runOnFunction(Function &F) {
1578 SE = &getAnalysis<ScalarEvolution>();
1579 DL = getAnalysisIfAvailable<DataLayout>();
1580 TTI = &getAnalysis<TargetTransformInfo>();
1581 AA = &getAnalysis<AliasAnalysis>();
1582 LI = &getAnalysis<LoopInfo>();
1583 DT = &getAnalysis<DominatorTree>();
1586 bool Changed = false;
1588 // If the target claims to have no vector registers don't attempt
1590 if (!TTI->getNumberOfRegisters(true))
1593 // Must have DataLayout. We can't require it because some tests run w/o
1598 // Don't vectorize when the attribute NoImplicitFloat is used.
1599 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
1602 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
1604 // Use the bollom up slp vectorizer to construct chains that start with
1605 // he store instructions.
1606 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT);
1608 // Scan the blocks in the function in post order.
1609 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()),
1610 e = po_end(&F.getEntryBlock()); it != e; ++it) {
1611 BasicBlock *BB = *it;
1613 // Vectorize trees that end at stores.
1614 if (unsigned count = collectStores(BB, R)) {
1616 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n");
1617 Changed |= vectorizeStoreChains(R);
1620 // Vectorize trees that end at reductions.
1621 Changed |= vectorizeChainsInBlock(BB, R);
1625 R.optimizeGatherSequence();
1626 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
1627 DEBUG(verifyFunction(F));
1632 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
1633 FunctionPass::getAnalysisUsage(AU);
1634 AU.addRequired<ScalarEvolution>();
1635 AU.addRequired<AliasAnalysis>();
1636 AU.addRequired<TargetTransformInfo>();
1637 AU.addRequired<LoopInfo>();
1638 AU.addRequired<DominatorTree>();
1639 AU.addPreserved<LoopInfo>();
1640 AU.addPreserved<DominatorTree>();
1641 AU.setPreservesCFG();
1646 /// \brief Collect memory references and sort them according to their base
1647 /// object. We sort the stores to their base objects to reduce the cost of the
1648 /// quadratic search on the stores. TODO: We can further reduce this cost
1649 /// if we flush the chain creation every time we run into a memory barrier.
1650 unsigned collectStores(BasicBlock *BB, BoUpSLP &R);
1652 /// \brief Try to vectorize a chain that starts at two arithmetic instrs.
1653 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R);
1655 /// \brief Try to vectorize a list of operands.
1656 /// \returns true if a value was vectorized.
1657 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R);
1659 /// \brief Try to vectorize a chain that may start at the operands of \V;
1660 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R);
1662 /// \brief Vectorize the stores that were collected in StoreRefs.
1663 bool vectorizeStoreChains(BoUpSLP &R);
1665 /// \brief Scan the basic block and look for patterns that are likely to start
1666 /// a vectorization chain.
1667 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R);
1669 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold,
1672 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold,
1675 StoreListMap StoreRefs;
1678 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain,
1679 int CostThreshold, BoUpSLP &R) {
1680 unsigned ChainLen = Chain.size();
1681 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
1683 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType();
1684 unsigned Sz = DL->getTypeSizeInBits(StoreTy);
1685 unsigned VF = MinVecRegSize / Sz;
1687 if (!isPowerOf2_32(Sz) || VF < 2)
1690 bool Changed = false;
1691 // Look for profitable vectorizable trees at all offsets, starting at zero.
1692 for (unsigned i = 0, e = ChainLen; i < e; ++i) {
1695 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
1697 ArrayRef<Value *> Operands = Chain.slice(i, VF);
1699 R.buildTree(Operands);
1701 int Cost = R.getTreeCost();
1703 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
1704 if (Cost < CostThreshold) {
1705 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
1708 // Move to the next bundle.
1717 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores,
1718 int costThreshold, BoUpSLP &R) {
1719 SetVector<Value *> Heads, Tails;
1720 SmallDenseMap<Value *, Value *> ConsecutiveChain;
1722 // We may run into multiple chains that merge into a single chain. We mark the
1723 // stores that we vectorized so that we don't visit the same store twice.
1724 BoUpSLP::ValueSet VectorizedStores;
1725 bool Changed = false;
1727 // Do a quadratic search on all of the given stores and find
1728 // all of the pairs of stores that follow each other.
1729 for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
1730 for (unsigned j = 0; j < e; ++j) {
1734 if (R.isConsecutiveAccess(Stores[i], Stores[j])) {
1735 Tails.insert(Stores[j]);
1736 Heads.insert(Stores[i]);
1737 ConsecutiveChain[Stores[i]] = Stores[j];
1742 // For stores that start but don't end a link in the chain:
1743 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end();
1745 if (Tails.count(*it))
1748 // We found a store instr that starts a chain. Now follow the chain and try
1750 BoUpSLP::ValueList Operands;
1752 // Collect the chain into a list.
1753 while (Tails.count(I) || Heads.count(I)) {
1754 if (VectorizedStores.count(I))
1756 Operands.push_back(I);
1757 // Move to the next value in the chain.
1758 I = ConsecutiveChain[I];
1761 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R);
1763 // Mark the vectorized stores so that we don't vectorize them again.
1765 VectorizedStores.insert(Operands.begin(), Operands.end());
1766 Changed |= Vectorized;
1773 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
1776 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
1777 StoreInst *SI = dyn_cast<StoreInst>(it);
1781 // Check that the pointer points to scalars.
1782 Type *Ty = SI->getValueOperand()->getType();
1783 if (Ty->isAggregateType() || Ty->isVectorTy())
1786 // Find the base of the GEP.
1787 Value *Ptr = SI->getPointerOperand();
1788 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
1789 Ptr = GEP->getPointerOperand();
1791 // Save the store locations.
1792 StoreRefs[Ptr].push_back(SI);
1798 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
1801 Value *VL[] = { A, B };
1802 return tryToVectorizeList(VL, R);
1805 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) {
1809 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n");
1811 // Check that all of the parts are scalar instructions of the same type.
1812 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
1816 unsigned Opcode0 = I0->getOpcode();
1818 Type *Ty0 = I0->getType();
1819 unsigned Sz = DL->getTypeSizeInBits(Ty0);
1820 unsigned VF = MinVecRegSize / Sz;
1822 for (int i = 0, e = VL.size(); i < e; ++i) {
1823 Type *Ty = VL[i]->getType();
1824 if (Ty->isAggregateType() || Ty->isVectorTy())
1826 Instruction *Inst = dyn_cast<Instruction>(VL[i]);
1827 if (!Inst || Inst->getOpcode() != Opcode0)
1831 bool Changed = false;
1833 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1834 unsigned OpsWidth = 0;
1841 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
1844 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n");
1845 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth);
1848 int Cost = R.getTreeCost();
1850 if (Cost < -SLPCostThreshold) {
1851 DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n");
1854 // Move to the next bundle.
1863 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
1867 // Try to vectorize V.
1868 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R))
1871 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0));
1872 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1));
1874 if (B && B->hasOneUse()) {
1875 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
1876 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
1877 if (tryToVectorizePair(A, B0, R)) {
1881 if (tryToVectorizePair(A, B1, R)) {
1888 if (A && A->hasOneUse()) {
1889 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
1890 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
1891 if (tryToVectorizePair(A0, B, R)) {
1895 if (tryToVectorizePair(A1, B, R)) {
1903 /// \brief Generate a shuffle mask to be used in a reduction tree.
1905 /// \param VecLen The length of the vector to be reduced.
1906 /// \param NumEltsToRdx The number of elements that should be reduced in the
1908 /// \param IsPairwise Whether the reduction is a pairwise or splitting
1909 /// reduction. A pairwise reduction will generate a mask of
1910 /// <0,2,...> or <1,3,..> while a splitting reduction will generate
1911 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
1912 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
1913 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
1914 bool IsPairwise, bool IsLeft,
1915 IRBuilder<> &Builder) {
1916 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
1918 SmallVector<Constant *, 32> ShuffleMask(
1919 VecLen, UndefValue::get(Builder.getInt32Ty()));
1922 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
1923 for (unsigned i = 0; i != NumEltsToRdx; ++i)
1924 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
1926 // Move the upper half of the vector to the lower half.
1927 for (unsigned i = 0; i != NumEltsToRdx; ++i)
1928 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
1930 return ConstantVector::get(ShuffleMask);
1934 /// Model horizontal reductions.
1936 /// A horizontal reduction is a tree of reduction operations (currently add and
1937 /// fadd) that has operations that can be put into a vector as its leaf.
1938 /// For example, this tree:
1945 /// This tree has "mul" as its reduced values and "+" as its reduction
1946 /// operations. A reduction might be feeding into a store or a binary operation
1961 class HorizontalReduction {
1962 SmallPtrSet<Value *, 16> ReductionOps;
1963 SmallVector<Value *, 32> ReducedVals;
1965 BinaryOperator *ReductionRoot;
1966 PHINode *ReductionPHI;
1968 /// The opcode of the reduction.
1969 unsigned ReductionOpcode;
1970 /// The opcode of the values we perform a reduction on.
1971 unsigned ReducedValueOpcode;
1972 /// The width of one full horizontal reduction operation.
1973 unsigned ReduxWidth;
1974 /// Should we model this reduction as a pairwise reduction tree or a tree that
1975 /// splits the vector in halves and adds those halves.
1976 bool IsPairwiseReduction;
1979 HorizontalReduction()
1980 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0),
1981 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {}
1983 /// \brief Try to find a reduction tree.
1984 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B,
1987 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&
1988 "Thi phi needs to use the binary operator");
1990 // We could have a initial reductions that is not an add.
1991 // r *= v1 + v2 + v3 + v4
1992 // In such a case start looking for a tree rooted in the first '+'.
1994 if (B->getOperand(0) == Phi) {
1996 B = dyn_cast<BinaryOperator>(B->getOperand(1));
1997 } else if (B->getOperand(1) == Phi) {
1999 B = dyn_cast<BinaryOperator>(B->getOperand(0));
2006 Type *Ty = B->getType();
2007 if (Ty->isVectorTy())
2010 ReductionOpcode = B->getOpcode();
2011 ReducedValueOpcode = 0;
2012 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty);
2019 // We currently only support adds.
2020 if (ReductionOpcode != Instruction::Add &&
2021 ReductionOpcode != Instruction::FAdd)
2024 // Post order traverse the reduction tree starting at B. We only handle true
2025 // trees containing only binary operators.
2026 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack;
2027 Stack.push_back(std::make_pair(B, 0));
2028 while (!Stack.empty()) {
2029 BinaryOperator *TreeN = Stack.back().first;
2030 unsigned EdgeToVist = Stack.back().second++;
2031 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
2033 // Only handle trees in the current basic block.
2034 if (TreeN->getParent() != B->getParent())
2037 // Each tree node needs to have one user except for the ultimate
2039 if (!TreeN->hasOneUse() && TreeN != B)
2043 if (EdgeToVist == 2 || IsReducedValue) {
2044 if (IsReducedValue) {
2045 // Make sure that the opcodes of the operations that we are going to
2047 if (!ReducedValueOpcode)
2048 ReducedValueOpcode = TreeN->getOpcode();
2049 else if (ReducedValueOpcode != TreeN->getOpcode())
2051 ReducedVals.push_back(TreeN);
2053 // We need to be able to reassociate the adds.
2054 if (!TreeN->isAssociative())
2056 ReductionOps.insert(TreeN);
2063 // Visit left or right.
2064 Value *NextV = TreeN->getOperand(EdgeToVist);
2065 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV);
2067 Stack.push_back(std::make_pair(Next, 0));
2068 else if (NextV != Phi)
2074 /// \brief Attempt to vectorize the tree found by
2075 /// matchAssociativeReduction.
2076 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
2077 if (ReducedVals.empty())
2080 unsigned NumReducedVals = ReducedVals.size();
2081 if (NumReducedVals < ReduxWidth)
2084 Value *VectorizedTree = 0;
2085 IRBuilder<> Builder(ReductionRoot);
2086 FastMathFlags Unsafe;
2087 Unsafe.setUnsafeAlgebra();
2088 Builder.SetFastMathFlags(Unsafe);
2091 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
2092 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth);
2093 V.buildTree(ValsToReduce, &ReductionOps);
2096 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]);
2097 if (Cost >= -SLPCostThreshold)
2100 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
2103 // Vectorize a tree.
2104 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
2105 Value *VectorizedRoot = V.vectorizeTree();
2107 // Emit a reduction.
2108 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder);
2109 if (VectorizedTree) {
2110 Builder.SetCurrentDebugLocation(Loc);
2111 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
2112 ReducedSubTree, "bin.rdx");
2114 VectorizedTree = ReducedSubTree;
2117 if (VectorizedTree) {
2118 // Finish the reduction.
2119 for (; i < NumReducedVals; ++i) {
2120 Builder.SetCurrentDebugLocation(
2121 cast<Instruction>(ReducedVals[i])->getDebugLoc());
2122 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
2127 assert(ReductionRoot != NULL && "Need a reduction operation");
2128 ReductionRoot->setOperand(0, VectorizedTree);
2129 ReductionRoot->setOperand(1, ReductionPHI);
2131 ReductionRoot->replaceAllUsesWith(VectorizedTree);
2133 return VectorizedTree != 0;
2138 /// \brief Calcuate the cost of a reduction.
2139 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) {
2140 Type *ScalarTy = FirstReducedVal->getType();
2141 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
2143 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
2144 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
2146 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
2147 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
2149 int ScalarReduxCost =
2150 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy);
2152 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
2153 << " for reduction that starts with " << *FirstReducedVal
2155 << (IsPairwiseReduction ? "pairwise" : "splitting")
2156 << " reduction)\n");
2158 return VecReduxCost - ScalarReduxCost;
2161 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L,
2162 Value *R, const Twine &Name = "") {
2163 if (Opcode == Instruction::FAdd)
2164 return Builder.CreateFAdd(L, R, Name);
2165 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name);
2168 /// \brief Emit a horizontal reduction of the vectorized value.
2169 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) {
2170 assert(VectorizedValue && "Need to have a vectorized tree node");
2171 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue);
2172 assert(isPowerOf2_32(ReduxWidth) &&
2173 "We only handle power-of-two reductions for now");
2175 Value *TmpVec = ValToReduce;
2176 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
2177 if (IsPairwiseReduction) {
2179 createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
2181 createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
2183 Value *LeftShuf = Builder.CreateShuffleVector(
2184 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
2185 Value *RightShuf = Builder.CreateShuffleVector(
2186 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
2188 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf,
2192 createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
2193 Value *Shuf = Builder.CreateShuffleVector(
2194 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
2195 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx");
2199 // The result is in the first element of the vector.
2200 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
2204 /// \brief Recognize construction of vectors like
2205 /// %ra = insertelement <4 x float> undef, float %s0, i32 0
2206 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
2207 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
2208 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
2210 /// Returns true if it matches
2212 static bool findBuildVector(InsertElementInst *IE,
2213 SmallVectorImpl<Value *> &Ops) {
2214 if (!isa<UndefValue>(IE->getOperand(0)))
2218 Ops.push_back(IE->getOperand(1));
2220 if (IE->use_empty())
2223 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back());
2227 // If this isn't the final use, make sure the next insertelement is the only
2228 // use. It's OK if the final constructed vector is used multiple times
2229 if (!IE->hasOneUse())
2238 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
2239 bool Changed = false;
2240 SmallVector<Value *, 4> Incoming;
2241 SmallSet<Instruction *, 16> VisitedInstrs;
2243 // Collect the incoming values from the PHIs.
2244 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie;
2246 PHINode *P = dyn_cast<PHINode>(instr);
2251 // We may go through BB multiple times so skip the one we have checked.
2252 if (!VisitedInstrs.insert(instr))
2255 // Stop constructing the list when you reach a different type.
2256 if (Incoming.size() && P->getType() != Incoming[0]->getType()) {
2257 if (tryToVectorizeList(Incoming, R)) {
2258 // We would like to start over since some instructions are deleted
2259 // and the iterator may become invalid value.
2261 instr = BB->begin();
2268 Incoming.push_back(P);
2271 if (Incoming.size() > 1)
2272 Changed |= tryToVectorizeList(Incoming, R);
2274 VisitedInstrs.clear();
2276 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
2277 // We may go through BB multiple times so skip the one we have checked.
2278 if (!VisitedInstrs.insert(it))
2281 if (isa<DbgInfoIntrinsic>(it))
2284 // Try to vectorize reductions that use PHINodes.
2285 if (PHINode *P = dyn_cast<PHINode>(it)) {
2286 // Check that the PHI is a reduction PHI.
2287 if (P->getNumIncomingValues() != 2)
2290 (P->getIncomingBlock(0) == BB
2291 ? (P->getIncomingValue(0))
2292 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0));
2293 // Check if this is a Binary Operator.
2294 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
2298 // Try to match and vectorize a horizontal reduction.
2299 HorizontalReduction HorRdx;
2300 if (ShouldVectorizeHor &&
2301 HorRdx.matchAssociativeReduction(P, BI, DL) &&
2302 HorRdx.tryToReduce(R, TTI)) {
2309 Value *Inst = BI->getOperand(0);
2311 Inst = BI->getOperand(1);
2313 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) {
2314 // We would like to start over since some instructions are deleted
2315 // and the iterator may become invalid value.
2325 // Try to vectorize horizontal reductions feeding into a store.
2326 if (ShouldStartVectorizeHorAtStore)
2327 if (StoreInst *SI = dyn_cast<StoreInst>(it))
2328 if (BinaryOperator *BinOp =
2329 dyn_cast<BinaryOperator>(SI->getValueOperand())) {
2330 HorizontalReduction HorRdx;
2331 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) &&
2332 HorRdx.tryToReduce(R, TTI)) ||
2333 tryToVectorize(BinOp, R))) {
2341 // Try to vectorize trees that start at compare instructions.
2342 if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
2343 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
2345 // We would like to start over since some instructions are deleted
2346 // and the iterator may become invalid value.
2352 for (int i = 0; i < 2; ++i) {
2353 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) {
2354 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) {
2356 // We would like to start over since some instructions are deleted
2357 // and the iterator may become invalid value.
2366 // Try to vectorize trees that start at insertelement instructions.
2367 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) {
2368 SmallVector<Value *, 8> Ops;
2369 if (!findBuildVector(IE, Ops))
2372 if (tryToVectorizeList(Ops, R)) {
2385 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) {
2386 bool Changed = false;
2387 // Attempt to sort and vectorize each of the store-groups.
2388 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end();
2390 if (it->second.size() < 2)
2393 DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
2394 << it->second.size() << ".\n");
2396 // Process the stores in chunks of 16.
2397 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
2398 unsigned Len = std::min<unsigned>(CE - CI, 16);
2399 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len);
2400 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R);
2406 } // end anonymous namespace
2408 char SLPVectorizer::ID = 0;
2409 static const char lv_name[] = "SLP Vectorizer";
2410 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
2411 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
2412 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
2413 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
2414 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
2415 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
2418 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }