+INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
+
+Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
+ int Runtime) {
+ return new LoopUnroll(Threshold, Count, AllowPartial, Runtime);
+}
+
+Pass *llvm::createSimpleLoopUnrollPass() {
+ return llvm::createLoopUnrollPass(-1, -1, 0, 0);
+}
+
+static bool isLoadFromConstantInitializer(Value *V) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ if (GV->isConstant() && GV->hasDefinitiveInitializer())
+ return GV->getInitializer();
+ return false;
+}
+
+struct FindConstantPointers {
+ bool LoadCanBeConstantFolded;
+ bool IndexIsConstant;
+ APInt Step;
+ APInt StartValue;
+ Value *BaseAddress;
+ const Loop *L;
+ ScalarEvolution &SE;
+ FindConstantPointers(const Loop *loop, ScalarEvolution &SE)
+ : LoadCanBeConstantFolded(true), IndexIsConstant(true), L(loop), SE(SE) {}
+
+ bool follow(const SCEV *S) {
+ if (const SCEVUnknown *SC = dyn_cast<SCEVUnknown>(S)) {
+ // We've reached the leaf node of SCEV, it's most probably just a
+ // variable. Now it's time to see if it corresponds to a global constant
+ // global (in which case we can eliminate the load), or not.
+ BaseAddress = SC->getValue();
+ LoadCanBeConstantFolded =
+ IndexIsConstant && isLoadFromConstantInitializer(BaseAddress);
+ return false;
+ }
+ if (isa<SCEVConstant>(S))
+ return true;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // If the current SCEV expression is AddRec, and its loop isn't the loop
+ // we are about to unroll, then we won't get a constant address after
+ // unrolling, and thus, won't be able to eliminate the load.
+ if (AR->getLoop() != L)
+ return IndexIsConstant = false;
+ // If the step isn't constant, we won't get constant addresses in unrolled
+ // version. Bail out.
+ if (const SCEVConstant *StepSE =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
+ Step = StepSE->getValue()->getValue();
+ else
+ return IndexIsConstant = false;
+
+ return IndexIsConstant;
+ }
+ // If Result is true, continue traversal.
+ // Otherwise, we have found something that prevents us from (possible) load
+ // elimination.
+ return IndexIsConstant;
+ }
+ bool isDone() const { return !IndexIsConstant; }
+};
+
+// This class is used to get an estimate of the optimization effects that we
+// could get from complete loop unrolling. It comes from the fact that some
+// loads might be replaced with concrete constant values and that could trigger
+// a chain of instruction simplifications.
+//
+// E.g. we might have:
+// int a[] = {0, 1, 0};
+// v = 0;
+// for (i = 0; i < 3; i ++)
+// v += b[i]*a[i];
+// If we completely unroll the loop, we would get:
+// v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
+// Which then will be simplified to:
+// v = b[0]* 0 + b[1]* 1 + b[2]* 0
+// And finally:
+// v = b[1]
+class UnrollAnalyzer : public InstVisitor<UnrollAnalyzer, bool> {
+ typedef InstVisitor<UnrollAnalyzer, bool> Base;
+ friend class InstVisitor<UnrollAnalyzer, bool>;
+
+ const Loop *L;
+ unsigned TripCount;
+ ScalarEvolution &SE;
+ const TargetTransformInfo &TTI;
+
+ DenseMap<Value *, Constant *> SimplifiedValues;
+ DenseMap<LoadInst *, Value *> LoadBaseAddresses;
+ SmallPtrSet<Instruction *, 32> CountedInstructions;
+
+ /// \brief Count the number of optimized instructions.
+ unsigned NumberOfOptimizedInstructions;
+
+ // Provide base case for our instruction visit.
+ bool visitInstruction(Instruction &I) { return false; };
+ // TODO: We should also visit ICmp, FCmp, GetElementPtr, Trunc, ZExt, SExt,
+ // FPTrunc, FPExt, FPToUI, FPToSI, UIToFP, SIToFP, BitCast, Select,
+ // ExtractElement, InsertElement, ShuffleVector, ExtractValue, InsertValue.
+ //
+ // Probaly it's worth to hoist the code for estimating the simplifications
+ // effects to a separate class, since we have a very similar code in
+ // InlineCost already.
+ bool visitBinaryOperator(BinaryOperator &I) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ if (!isa<Constant>(LHS))
+ if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+ LHS = SimpleLHS;
+ if (!isa<Constant>(RHS))
+ if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+ RHS = SimpleRHS;
+ Value *SimpleV = nullptr;
+ if (auto FI = dyn_cast<FPMathOperator>(&I))
+ SimpleV =
+ SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags());
+ else
+ SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS);
+
+ if (SimpleV && CountedInstructions.insert(&I).second)
+ NumberOfOptimizedInstructions += TTI.getUserCost(&I);
+
+ if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
+ return false;
+ }
+
+ Constant *computeLoadValue(LoadInst *LI, unsigned Iteration) {
+ if (!LI)
+ return nullptr;
+ Value *BaseAddr = LoadBaseAddresses[LI];
+ if (!BaseAddr)
+ return nullptr;
+
+ auto GV = dyn_cast<GlobalVariable>(BaseAddr);
+ if (!GV)
+ return nullptr;
+
+ ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(GV->getInitializer());
+ if (!CDS)
+ return nullptr;
+
+ const SCEV *BaseAddrSE = SE.getSCEV(BaseAddr);
+ const SCEV *S = SE.getSCEV(LI->getPointerOperand());
+ const SCEV *OffSE = SE.getMinusSCEV(S, BaseAddrSE);
+
+ APInt StepC, StartC;
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(OffSE);
+ if (!AR)
+ return nullptr;
+
+ if (const SCEVConstant *StepSE =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
+ StepC = StepSE->getValue()->getValue();
+ else
+ return nullptr;
+
+ if (const SCEVConstant *StartSE = dyn_cast<SCEVConstant>(AR->getStart()))
+ StartC = StartSE->getValue()->getValue();
+ else
+ return nullptr;
+
+ unsigned ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
+ unsigned Start = StartC.getLimitedValue();
+ unsigned Step = StepC.getLimitedValue();
+
+ unsigned Index = (Start + Step * Iteration) / ElemSize;
+ if (Index >= CDS->getNumElements())
+ return nullptr;
+
+ Constant *CV = CDS->getElementAsConstant(Index);
+
+ return CV;
+ }
+
+public:
+ UnrollAnalyzer(const Loop *L, unsigned TripCount, ScalarEvolution &SE,
+ const TargetTransformInfo &TTI)
+ : L(L), TripCount(TripCount), SE(SE), TTI(TTI),
+ NumberOfOptimizedInstructions(0) {}
+
+ // Visit all loads the loop L, and for those that, after complete loop
+ // unrolling, would have a constant address and it will point to a known
+ // constant initializer, record its base address for future use. It is used
+ // when we estimate number of potentially simplified instructions.
+ void findConstFoldableLoads() {
+ for (auto BB : L->getBlocks()) {
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isSimple())
+ continue;
+ Value *AddrOp = LI->getPointerOperand();
+ const SCEV *S = SE.getSCEV(AddrOp);
+ FindConstantPointers Visitor(L, SE);
+ SCEVTraversal<FindConstantPointers> T(Visitor);
+ T.visitAll(S);
+ if (Visitor.IndexIsConstant && Visitor.LoadCanBeConstantFolded) {
+ LoadBaseAddresses[LI] = Visitor.BaseAddress;
+ }
+ }
+ }
+ }
+ }
+
+ // Given a list of loads that could be constant-folded (LoadBaseAddresses),
+ // estimate number of optimized instructions after substituting the concrete
+ // values for the given Iteration.
+ // Fill in SimplifiedValues map for future use in DCE-estimation.
+ unsigned estimateNumberOfSimplifiedInstructions(unsigned Iteration) {
+ SmallVector<Instruction *, 8> Worklist;
+ SimplifiedValues.clear();
+ CountedInstructions.clear();
+ NumberOfOptimizedInstructions = 0;
+
+ // We start by adding all loads to the worklist.
+ for (auto &LoadDescr : LoadBaseAddresses) {
+ LoadInst *LI = LoadDescr.first;
+ SimplifiedValues[LI] = computeLoadValue(LI, Iteration);
+ if (CountedInstructions.insert(LI).second)
+ NumberOfOptimizedInstructions += TTI.getUserCost(LI);
+
+ for (User *U : LI->users()) {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI)
+ continue;
+ if (!L->contains(UI))
+ continue;
+ Worklist.push_back(UI);
+ }
+ }
+
+ // And then we try to simplify every user of every instruction from the
+ // worklist. If we do simplify a user, add it to the worklist to process
+ // its users as well.
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+ if (!visit(I))
+ continue;
+ for (User *U : I->users()) {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI)
+ continue;
+ if (!L->contains(UI))
+ continue;
+ Worklist.push_back(UI);
+ }
+ }
+ return NumberOfOptimizedInstructions;
+ }
+
+ // Given a list of potentially simplifed instructions, estimate number of
+ // instructions that would become dead if we do perform the simplification.
+ unsigned estimateNumberOfDeadInstructions() {
+ NumberOfOptimizedInstructions = 0;
+
+ // We keep a set vector for the worklist so that we don't wast space in the
+ // worklist queuing up the same instruction repeatedly. This can happen due
+ // to multiple operands being the same instruction or due to the same
+ // instruction being an operand of lots of things that end up dead or
+ // simplified.
+ SmallSetVector<Instruction *, 8> Worklist;
+
+ // The dead instructions are held in a separate set. This is used to
+ // prevent us from re-examining instructions and make sure we only count
+ // the benifit once. The worklist's internal set handles insertion
+ // deduplication.
+ SmallPtrSet<Instruction *, 16> DeadInstructions;
+
+ // Lambda to enque operands onto the worklist.
+ auto EnqueueOperands = [&](Instruction &I) {
+ for (auto *Op : I.operand_values())
+ if (auto *OpI = dyn_cast<Instruction>(Op))
+ Worklist.insert(OpI);
+ };