1 //===--- Scalarizer.cpp - Scalarize vector operations ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass converts vector operations into scalar operations, in order
11 // to expose optimization opportunities on the individual scalar operations.
12 // It is mainly intended for targets that do not have vector units, but it
13 // may also be useful for revectorizing code to different vector widths.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/InstVisitor.h"
20 #include "llvm/Pass.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Transforms/Scalar.h"
23 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
27 #define DEBUG_TYPE "scalarizer"
30 // Used to store the scattered form of a vector.
31 typedef SmallVector<Value *, 8> ValueVector;
33 // Used to map a vector Value to its scattered form. We use std::map
34 // because we want iterators to persist across insertion and because the
35 // values are relatively large.
36 typedef std::map<Value *, ValueVector> ScatterMap;
38 // Lists Instructions that have been replaced with scalar implementations,
39 // along with a pointer to their scattered forms.
40 typedef SmallVector<std::pair<Instruction *, ValueVector *>, 16> GatherList;
42 // Provides a very limited vector-like interface for lazily accessing one
43 // component of a scattered vector or vector pointer.
48 // Scatter V into Size components. If new instructions are needed,
49 // insert them before BBI in BB. If Cache is nonnull, use it to cache
51 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
52 ValueVector *cachePtr = nullptr);
54 // Return component I, creating a new Value for it if necessary.
55 Value *operator[](unsigned I);
57 // Return the number of components.
58 unsigned size() const { return Size; }
62 BasicBlock::iterator BBI;
64 ValueVector *CachePtr;
70 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
71 // called Name that compares X and Y in the same way as FCI.
73 FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
74 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
75 const Twine &Name) const {
76 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
81 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
82 // called Name that compares X and Y in the same way as ICI.
84 ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
85 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
86 const Twine &Name) const {
87 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
92 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
93 // a binary operator like BO called Name with operands X and Y.
94 struct BinarySplitter {
95 BinarySplitter(BinaryOperator &bo) : BO(bo) {}
96 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
97 const Twine &Name) const {
98 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
103 // Information about a load or store that we're scalarizing.
104 struct VectorLayout {
105 VectorLayout() : VecTy(nullptr), ElemTy(nullptr), VecAlign(0), ElemSize(0) {}
107 // Return the alignment of element I.
108 uint64_t getElemAlign(unsigned I) {
109 return MinAlign(VecAlign, I * ElemSize);
112 // The type of the vector.
115 // The type of each element.
118 // The alignment of the vector.
121 // The size of each element.
125 class Scalarizer : public FunctionPass,
126 public InstVisitor<Scalarizer, bool> {
132 initializeScalarizerPass(*PassRegistry::getPassRegistry());
135 bool doInitialization(Module &M) override;
136 bool runOnFunction(Function &F) override;
138 // InstVisitor methods. They return true if the instruction was scalarized,
139 // false if nothing changed.
140 bool visitInstruction(Instruction &) { return false; }
141 bool visitSelectInst(SelectInst &SI);
142 bool visitICmpInst(ICmpInst &);
143 bool visitFCmpInst(FCmpInst &);
144 bool visitBinaryOperator(BinaryOperator &);
145 bool visitGetElementPtrInst(GetElementPtrInst &);
146 bool visitCastInst(CastInst &);
147 bool visitBitCastInst(BitCastInst &);
148 bool visitShuffleVectorInst(ShuffleVectorInst &);
149 bool visitPHINode(PHINode &);
150 bool visitLoadInst(LoadInst &);
151 bool visitStoreInst(StoreInst &);
153 static void registerOptions() {
154 // This is disabled by default because having separate loads and stores
155 // makes it more likely that the -combiner-alias-analysis limits will be
157 OptionRegistry::registerOption<bool, Scalarizer,
158 &Scalarizer::ScalarizeLoadStore>(
159 "scalarize-load-store",
160 "Allow the scalarizer pass to scalarize loads and store", false);
164 Scatterer scatter(Instruction *, Value *);
165 void gather(Instruction *, const ValueVector &);
166 bool canTransferMetadata(unsigned Kind);
167 void transferMetadata(Instruction *, const ValueVector &);
168 bool getVectorLayout(Type *, unsigned, VectorLayout &, const DataLayout &);
171 template<typename T> bool splitBinary(Instruction &, const T &);
173 ScatterMap Scattered;
175 unsigned ParallelLoopAccessMDKind;
176 bool ScalarizeLoadStore;
179 char Scalarizer::ID = 0;
180 } // end anonymous namespace
182 INITIALIZE_PASS_WITH_OPTIONS(Scalarizer, "scalarizer",
183 "Scalarize vector operations", false, false)
185 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
186 ValueVector *cachePtr)
187 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
188 Type *Ty = V->getType();
189 PtrTy = dyn_cast<PointerType>(Ty);
191 Ty = PtrTy->getElementType();
192 Size = Ty->getVectorNumElements();
194 Tmp.resize(Size, nullptr);
195 else if (CachePtr->empty())
196 CachePtr->resize(Size, nullptr);
198 assert(Size == CachePtr->size() && "Inconsistent vector sizes");
201 // Return component I, creating a new Value for it if necessary.
202 Value *Scatterer::operator[](unsigned I) {
203 ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
204 // Try to reuse a previous value.
207 IRBuilder<> Builder(BB, BBI);
211 PointerType::get(PtrTy->getElementType()->getVectorElementType(),
212 PtrTy->getAddressSpace());
213 CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0");
216 CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I,
217 V->getName() + ".i" + Twine(I));
219 // Search through a chain of InsertElementInsts looking for element I.
220 // Record other elements in the cache. The new V is still suitable
221 // for all uncached indices.
223 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
226 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
229 unsigned J = Idx->getZExtValue();
230 V = Insert->getOperand(0);
232 CV[J] = Insert->getOperand(1);
235 // Only cache the first entry we find for each index we're not actively
236 // searching for. This prevents us from going too far up the chain and
237 // caching incorrect entries.
238 CV[J] = Insert->getOperand(1);
241 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
242 V->getName() + ".i" + Twine(I));
247 bool Scalarizer::doInitialization(Module &M) {
248 ParallelLoopAccessMDKind =
249 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
251 M.getContext().getOption<bool, Scalarizer, &Scalarizer::ScalarizeLoadStore>();
255 bool Scalarizer::runOnFunction(Function &F) {
256 assert(Gathered.empty() && Scattered.empty());
257 for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
258 BasicBlock *BB = BBI;
259 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
261 bool Done = visit(I);
263 if (Done && I->getType()->isVoidTy())
264 I->eraseFromParent();
270 // Return a scattered form of V that can be accessed by Point. V must be a
271 // vector or a pointer to a vector.
272 Scatterer Scalarizer::scatter(Instruction *Point, Value *V) {
273 if (Argument *VArg = dyn_cast<Argument>(V)) {
274 // Put the scattered form of arguments in the entry block,
275 // so that it can be used everywhere.
276 Function *F = VArg->getParent();
277 BasicBlock *BB = &F->getEntryBlock();
278 return Scatterer(BB, BB->begin(), V, &Scattered[V]);
280 if (Instruction *VOp = dyn_cast<Instruction>(V)) {
281 // Put the scattered form of an instruction directly after the
283 BasicBlock *BB = VOp->getParent();
284 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
287 // In the fallback case, just put the scattered before Point and
288 // keep the result local to Point.
289 return Scatterer(Point->getParent(), Point, V);
292 // Replace Op with the gathered form of the components in CV. Defer the
293 // deletion of Op and creation of the gathered form to the end of the pass,
294 // so that we can avoid creating the gathered form if all uses of Op are
295 // replaced with uses of CV.
296 void Scalarizer::gather(Instruction *Op, const ValueVector &CV) {
297 // Since we're not deleting Op yet, stub out its operands, so that it
298 // doesn't make anything live unnecessarily.
299 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
300 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
302 transferMetadata(Op, CV);
304 // If we already have a scattered form of Op (created from ExtractElements
305 // of Op itself), replace them with the new form.
306 ValueVector &SV = Scattered[Op];
308 for (unsigned I = 0, E = SV.size(); I != E; ++I) {
309 Instruction *Old = cast<Instruction>(SV[I]);
310 CV[I]->takeName(Old);
311 Old->replaceAllUsesWith(CV[I]);
312 Old->eraseFromParent();
316 Gathered.push_back(GatherList::value_type(Op, &SV));
319 // Return true if it is safe to transfer the given metadata tag from
320 // vector to scalar instructions.
321 bool Scalarizer::canTransferMetadata(unsigned Tag) {
322 return (Tag == LLVMContext::MD_tbaa
323 || Tag == LLVMContext::MD_fpmath
324 || Tag == LLVMContext::MD_tbaa_struct
325 || Tag == LLVMContext::MD_invariant_load
326 || Tag == LLVMContext::MD_alias_scope
327 || Tag == LLVMContext::MD_noalias
328 || Tag == ParallelLoopAccessMDKind);
331 // Transfer metadata from Op to the instructions in CV if it is known
332 // to be safe to do so.
333 void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) {
334 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
335 Op->getAllMetadataOtherThanDebugLoc(MDs);
336 for (unsigned I = 0, E = CV.size(); I != E; ++I) {
337 if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
338 for (SmallVectorImpl<std::pair<unsigned, MDNode *>>::iterator
342 if (canTransferMetadata(MI->first))
343 New->setMetadata(MI->first, MI->second);
344 New->setDebugLoc(Op->getDebugLoc());
349 // Try to fill in Layout from Ty, returning true on success. Alignment is
350 // the alignment of the vector, or 0 if the ABI default should be used.
351 bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment,
352 VectorLayout &Layout, const DataLayout &DL) {
353 // Make sure we're dealing with a vector.
354 Layout.VecTy = dyn_cast<VectorType>(Ty);
358 // Check that we're dealing with full-byte elements.
359 Layout.ElemTy = Layout.VecTy->getElementType();
360 if (DL.getTypeSizeInBits(Layout.ElemTy) !=
361 DL.getTypeStoreSizeInBits(Layout.ElemTy))
365 Layout.VecAlign = Alignment;
367 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
368 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
372 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
373 // to create an instruction like I with operands X and Y and name Name.
374 template<typename Splitter>
375 bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) {
376 VectorType *VT = dyn_cast<VectorType>(I.getType());
380 unsigned NumElems = VT->getNumElements();
381 IRBuilder<> Builder(I.getParent(), &I);
382 Scatterer Op0 = scatter(&I, I.getOperand(0));
383 Scatterer Op1 = scatter(&I, I.getOperand(1));
384 assert(Op0.size() == NumElems && "Mismatched binary operation");
385 assert(Op1.size() == NumElems && "Mismatched binary operation");
387 Res.resize(NumElems);
388 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
389 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
390 I.getName() + ".i" + Twine(Elem));
395 bool Scalarizer::visitSelectInst(SelectInst &SI) {
396 VectorType *VT = dyn_cast<VectorType>(SI.getType());
400 unsigned NumElems = VT->getNumElements();
401 IRBuilder<> Builder(SI.getParent(), &SI);
402 Scatterer Op1 = scatter(&SI, SI.getOperand(1));
403 Scatterer Op2 = scatter(&SI, SI.getOperand(2));
404 assert(Op1.size() == NumElems && "Mismatched select");
405 assert(Op2.size() == NumElems && "Mismatched select");
407 Res.resize(NumElems);
409 if (SI.getOperand(0)->getType()->isVectorTy()) {
410 Scatterer Op0 = scatter(&SI, SI.getOperand(0));
411 assert(Op0.size() == NumElems && "Mismatched select");
412 for (unsigned I = 0; I < NumElems; ++I)
413 Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
414 SI.getName() + ".i" + Twine(I));
416 Value *Op0 = SI.getOperand(0);
417 for (unsigned I = 0; I < NumElems; ++I)
418 Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
419 SI.getName() + ".i" + Twine(I));
425 bool Scalarizer::visitICmpInst(ICmpInst &ICI) {
426 return splitBinary(ICI, ICmpSplitter(ICI));
429 bool Scalarizer::visitFCmpInst(FCmpInst &FCI) {
430 return splitBinary(FCI, FCmpSplitter(FCI));
433 bool Scalarizer::visitBinaryOperator(BinaryOperator &BO) {
434 return splitBinary(BO, BinarySplitter(BO));
437 bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
438 VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
442 IRBuilder<> Builder(GEPI.getParent(), &GEPI);
443 unsigned NumElems = VT->getNumElements();
444 unsigned NumIndices = GEPI.getNumIndices();
446 Scatterer Base = scatter(&GEPI, GEPI.getOperand(0));
448 SmallVector<Scatterer, 8> Ops;
449 Ops.resize(NumIndices);
450 for (unsigned I = 0; I < NumIndices; ++I)
451 Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1));
454 Res.resize(NumElems);
455 for (unsigned I = 0; I < NumElems; ++I) {
456 SmallVector<Value *, 8> Indices;
457 Indices.resize(NumIndices);
458 for (unsigned J = 0; J < NumIndices; ++J)
459 Indices[J] = Ops[J][I];
460 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
461 GEPI.getName() + ".i" + Twine(I));
462 if (GEPI.isInBounds())
463 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
464 NewGEPI->setIsInBounds();
470 bool Scalarizer::visitCastInst(CastInst &CI) {
471 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
475 unsigned NumElems = VT->getNumElements();
476 IRBuilder<> Builder(CI.getParent(), &CI);
477 Scatterer Op0 = scatter(&CI, CI.getOperand(0));
478 assert(Op0.size() == NumElems && "Mismatched cast");
480 Res.resize(NumElems);
481 for (unsigned I = 0; I < NumElems; ++I)
482 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
483 CI.getName() + ".i" + Twine(I));
488 bool Scalarizer::visitBitCastInst(BitCastInst &BCI) {
489 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
490 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
491 if (!DstVT || !SrcVT)
494 unsigned DstNumElems = DstVT->getNumElements();
495 unsigned SrcNumElems = SrcVT->getNumElements();
496 IRBuilder<> Builder(BCI.getParent(), &BCI);
497 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
499 Res.resize(DstNumElems);
501 if (DstNumElems == SrcNumElems) {
502 for (unsigned I = 0; I < DstNumElems; ++I)
503 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
504 BCI.getName() + ".i" + Twine(I));
505 } else if (DstNumElems > SrcNumElems) {
506 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
507 // individual elements to the destination.
508 unsigned FanOut = DstNumElems / SrcNumElems;
509 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
511 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
512 Value *V = Op0[Op0I];
514 // Look through any existing bitcasts before converting to <N x t2>.
515 // In the best case, the resulting conversion might be a no-op.
516 while ((VI = dyn_cast<Instruction>(V)) &&
517 VI->getOpcode() == Instruction::BitCast)
518 V = VI->getOperand(0);
519 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
520 Scatterer Mid = scatter(&BCI, V);
521 for (unsigned MidI = 0; MidI < FanOut; ++MidI)
522 Res[ResI++] = Mid[MidI];
525 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
526 unsigned FanIn = SrcNumElems / DstNumElems;
527 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
529 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
530 Value *V = UndefValue::get(MidTy);
531 for (unsigned MidI = 0; MidI < FanIn; ++MidI)
532 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
533 BCI.getName() + ".i" + Twine(ResI)
534 + ".upto" + Twine(MidI));
535 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
536 BCI.getName() + ".i" + Twine(ResI));
543 bool Scalarizer::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
544 VectorType *VT = dyn_cast<VectorType>(SVI.getType());
548 unsigned NumElems = VT->getNumElements();
549 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
550 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
552 Res.resize(NumElems);
554 for (unsigned I = 0; I < NumElems; ++I) {
555 int Selector = SVI.getMaskValue(I);
557 Res[I] = UndefValue::get(VT->getElementType());
558 else if (unsigned(Selector) < Op0.size())
559 Res[I] = Op0[Selector];
561 Res[I] = Op1[Selector - Op0.size()];
567 bool Scalarizer::visitPHINode(PHINode &PHI) {
568 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
572 unsigned NumElems = VT->getNumElements();
573 IRBuilder<> Builder(PHI.getParent(), &PHI);
575 Res.resize(NumElems);
577 unsigned NumOps = PHI.getNumOperands();
578 for (unsigned I = 0; I < NumElems; ++I)
579 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
580 PHI.getName() + ".i" + Twine(I));
582 for (unsigned I = 0; I < NumOps; ++I) {
583 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
584 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
585 for (unsigned J = 0; J < NumElems; ++J)
586 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
592 bool Scalarizer::visitLoadInst(LoadInst &LI) {
593 if (!ScalarizeLoadStore)
599 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
600 LI.getModule()->getDataLayout()))
603 unsigned NumElems = Layout.VecTy->getNumElements();
604 IRBuilder<> Builder(LI.getParent(), &LI);
605 Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
607 Res.resize(NumElems);
609 for (unsigned I = 0; I < NumElems; ++I)
610 Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
611 LI.getName() + ".i" + Twine(I));
616 bool Scalarizer::visitStoreInst(StoreInst &SI) {
617 if (!ScalarizeLoadStore)
623 Value *FullValue = SI.getValueOperand();
624 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
625 SI.getModule()->getDataLayout()))
628 unsigned NumElems = Layout.VecTy->getNumElements();
629 IRBuilder<> Builder(SI.getParent(), &SI);
630 Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
631 Scatterer Val = scatter(&SI, FullValue);
634 Stores.resize(NumElems);
635 for (unsigned I = 0; I < NumElems; ++I) {
636 unsigned Align = Layout.getElemAlign(I);
637 Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
639 transferMetadata(&SI, Stores);
643 // Delete the instructions that we scalarized. If a full vector result
644 // is still needed, recreate it using InsertElements.
645 bool Scalarizer::finish() {
646 // The presence of data in Gathered or Scattered indicates changes
647 // made to the Function.
648 if (Gathered.empty() && Scattered.empty())
650 for (GatherList::iterator GMI = Gathered.begin(), GME = Gathered.end();
652 Instruction *Op = GMI->first;
653 ValueVector &CV = *GMI->second;
654 if (!Op->use_empty()) {
655 // The value is still needed, so recreate it using a series of
657 Type *Ty = Op->getType();
658 Value *Res = UndefValue::get(Ty);
659 BasicBlock *BB = Op->getParent();
660 unsigned Count = Ty->getVectorNumElements();
661 IRBuilder<> Builder(BB, Op);
662 if (isa<PHINode>(Op))
663 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
664 for (unsigned I = 0; I < Count; ++I)
665 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
666 Op->getName() + ".upto" + Twine(I));
668 Op->replaceAllUsesWith(Res);
670 Op->eraseFromParent();
677 FunctionPass *llvm::createScalarizerPass() {
678 return new Scalarizer();