1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements some loop unrolling utilities. It does not define any
11 // actual pass or policy, but provides a single function to perform loop
14 // The process of unrolling can produce extraneous basic blocks linked with
15 // unconditional branches. This will be corrected in the future.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Utils/UnrollLoop.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AssumptionTracker.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/LoopIterator.h"
25 #include "llvm/Analysis/LoopPass.h"
26 #include "llvm/Analysis/ScalarEvolution.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/LLVMContext.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35 #include "llvm/Transforms/Utils/Cloning.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include "llvm/Transforms/Utils/LoopUtils.h"
38 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
41 #define DEBUG_TYPE "loop-unroll"
43 // TODO: Should these be here or in LoopUnroll?
44 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
45 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
47 /// RemapInstruction - Convert the instruction operands from referencing the
48 /// current values into those specified by VMap.
49 static inline void RemapInstruction(Instruction *I,
50 ValueToValueMapTy &VMap) {
51 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
52 Value *Op = I->getOperand(op);
53 ValueToValueMapTy::iterator It = VMap.find(Op);
55 I->setOperand(op, It->second);
58 if (PHINode *PN = dyn_cast<PHINode>(I)) {
59 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
60 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i));
62 PN->setIncomingBlock(i, cast<BasicBlock>(It->second));
67 /// FoldBlockIntoPredecessor - Folds a basic block into its predecessor if it
68 /// only has one predecessor, and that predecessor only has one successor.
69 /// The LoopInfo Analysis that is passed will be kept consistent. If folding is
70 /// successful references to the containing loop must be removed from
71 /// ScalarEvolution by calling ScalarEvolution::forgetLoop because SE may have
72 /// references to the eliminated BB. The argument ForgottenLoops contains a set
73 /// of loops that have already been forgotten to prevent redundant, expensive
74 /// calls to ScalarEvolution::forgetLoop. Returns the new combined block.
76 FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI, LPPassManager *LPM,
77 SmallPtrSetImpl<Loop *> &ForgottenLoops) {
78 // Merge basic blocks into their predecessor if there is only one distinct
79 // pred, and if there is only one distinct successor of the predecessor, and
80 // if there are no PHI nodes.
81 BasicBlock *OnlyPred = BB->getSinglePredecessor();
82 if (!OnlyPred) return nullptr;
84 if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
87 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
89 // Resolve any PHI nodes at the start of the block. They are all
90 // guaranteed to have exactly one entry if they exist, unless there are
91 // multiple duplicate (but guaranteed to be equal) entries for the
92 // incoming edges. This occurs when there are multiple edges from
93 // OnlyPred to OnlySucc.
94 FoldSingleEntryPHINodes(BB);
96 // Delete the unconditional branch from the predecessor...
97 OnlyPred->getInstList().pop_back();
99 // Make all PHI nodes that referred to BB now refer to Pred as their
101 BB->replaceAllUsesWith(OnlyPred);
103 // Move all definitions in the successor to the predecessor...
104 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList());
106 // OldName will be valid until erased.
107 StringRef OldName = BB->getName();
109 // Erase basic block from the function...
111 // ScalarEvolution holds references to loop exit blocks.
113 if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
114 if (Loop *L = LI->getLoopFor(BB)) {
115 if (ForgottenLoops.insert(L))
122 // Inherit predecessor's name if it exists...
123 if (!OldName.empty() && !OnlyPred->hasName())
124 OnlyPred->setName(OldName);
126 BB->eraseFromParent();
131 /// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true
132 /// if unrolling was successful, or false if the loop was unmodified. Unrolling
133 /// can only fail when the loop's latch block is not terminated by a conditional
134 /// branch instruction. However, if the trip count (and multiple) are not known,
135 /// loop unrolling will mostly produce more code that is no faster.
137 /// TripCount is generally defined as the number of times the loop header
138 /// executes. UnrollLoop relaxes the definition to permit early exits: here
139 /// TripCount is the iteration on which control exits LatchBlock if no early
140 /// exits were taken. Note that UnrollLoop assumes that the loop counter test
141 /// terminates LatchBlock in order to remove unnecesssary instances of the
142 /// test. In other words, control may exit the loop prior to TripCount
143 /// iterations via an early branch, but control may not exit the loop from the
144 /// LatchBlock's terminator prior to TripCount iterations.
146 /// Similarly, TripMultiple divides the number of times that the LatchBlock may
147 /// execute without exiting the loop.
149 /// The LoopInfo Analysis that is passed will be kept consistent.
151 /// If a LoopPassManager is passed in, and the loop is fully removed, it will be
152 /// removed from the LoopPassManager as well. LPM can also be NULL.
154 /// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are
155 /// available from the Pass it must also preserve those analyses.
156 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
157 bool AllowRuntime, unsigned TripMultiple,
158 LoopInfo *LI, Pass *PP, LPPassManager *LPM,
159 AssumptionTracker *AT) {
160 BasicBlock *Preheader = L->getLoopPreheader();
162 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
166 BasicBlock *LatchBlock = L->getLoopLatch();
168 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
172 // Loops with indirectbr cannot be cloned.
173 if (!L->isSafeToClone()) {
174 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
178 BasicBlock *Header = L->getHeader();
179 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
181 if (!BI || BI->isUnconditional()) {
182 // The loop-rotate pass can be helpful to avoid this in many cases.
184 " Can't unroll; loop not terminated by a conditional branch.\n");
188 if (Header->hasAddressTaken()) {
189 // The loop-rotate pass can be helpful to avoid this in many cases.
191 " Won't unroll loop: address of header block is taken.\n");
196 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
197 if (TripMultiple != 1)
198 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
200 // Effectively "DCE" unrolled iterations that are beyond the tripcount
201 // and will never be executed.
202 if (TripCount != 0 && Count > TripCount)
205 // Don't enter the unroll code if there is nothing to do. This way we don't
206 // need to support "partial unrolling by 1".
207 if (TripCount == 0 && Count < 2)
211 assert(TripMultiple > 0);
212 assert(TripCount == 0 || TripCount % TripMultiple == 0);
214 // Are we eliminating the loop control altogether?
215 bool CompletelyUnroll = Count == TripCount;
217 // We assume a run-time trip count if the compiler cannot
218 // figure out the loop trip count and the unroll-runtime
219 // flag is specified.
220 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
222 if (RuntimeTripCount && !UnrollRuntimeLoopProlog(L, Count, LI, LPM))
225 // Notify ScalarEvolution that the loop will be substantially changed,
226 // if not outright eliminated.
228 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
233 // If we know the trip count, we know the multiple...
234 unsigned BreakoutTrip = 0;
235 if (TripCount != 0) {
236 BreakoutTrip = TripCount % Count;
239 // Figure out what multiple to use.
240 BreakoutTrip = TripMultiple =
241 (unsigned)GreatestCommonDivisor64(Count, TripMultiple);
244 // Report the unrolling decision.
245 DebugLoc LoopLoc = L->getStartLoc();
246 Function *F = Header->getParent();
247 LLVMContext &Ctx = F->getContext();
249 if (CompletelyUnroll) {
250 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
251 << " with trip count " << TripCount << "!\n");
252 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
253 Twine("completely unrolled loop with ") +
254 Twine(TripCount) + " iterations");
256 auto EmitDiag = [&](const Twine &T) {
257 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
258 "unrolled loop by a factor of " + Twine(Count) +
262 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
264 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
265 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
266 EmitDiag(" with a breakout at trip " + Twine(BreakoutTrip));
267 } else if (TripMultiple != 1) {
268 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
269 EmitDiag(" with " + Twine(TripMultiple) + " trips per branch");
270 } else if (RuntimeTripCount) {
271 DEBUG(dbgs() << " with run-time trip count");
272 EmitDiag(" with run-time trip count");
274 DEBUG(dbgs() << "!\n");
277 bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
278 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
280 // For the first iteration of the loop, we should use the precloned values for
281 // PHI nodes. Insert associations now.
282 ValueToValueMapTy LastValueMap;
283 std::vector<PHINode*> OrigPHINode;
284 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
285 OrigPHINode.push_back(cast<PHINode>(I));
288 std::vector<BasicBlock*> Headers;
289 std::vector<BasicBlock*> Latches;
290 Headers.push_back(Header);
291 Latches.push_back(LatchBlock);
293 // The current on-the-fly SSA update requires blocks to be processed in
294 // reverse postorder so that LastValueMap contains the correct value at each
296 LoopBlocksDFS DFS(L);
299 // Stash the DFS iterators before adding blocks to the loop.
300 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
301 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
303 for (unsigned It = 1; It != Count; ++It) {
304 std::vector<BasicBlock*> NewBlocks;
306 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
307 ValueToValueMapTy VMap;
308 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
309 Header->getParent()->getBasicBlockList().push_back(New);
311 // Loop over all of the PHI nodes in the block, changing them to use the
312 // incoming values from the previous block.
314 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
315 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
316 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
317 if (Instruction *InValI = dyn_cast<Instruction>(InVal))
318 if (It > 1 && L->contains(InValI))
319 InVal = LastValueMap[InValI];
320 VMap[OrigPHINode[i]] = InVal;
321 New->getInstList().erase(NewPHI);
324 // Update our running map of newest clones
325 LastValueMap[*BB] = New;
326 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
328 LastValueMap[VI->first] = VI->second;
330 L->addBasicBlockToLoop(New, LI->getBase());
332 // Add phi entries for newly created values to all exit blocks.
333 for (succ_iterator SI = succ_begin(*BB), SE = succ_end(*BB);
335 if (L->contains(*SI))
337 for (BasicBlock::iterator BBI = (*SI)->begin();
338 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) {
339 Value *Incoming = phi->getIncomingValueForBlock(*BB);
340 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming);
341 if (It != LastValueMap.end())
342 Incoming = It->second;
343 phi->addIncoming(Incoming, New);
346 // Keep track of new headers and latches as we create them, so that
347 // we can insert the proper branches later.
349 Headers.push_back(New);
350 if (*BB == LatchBlock)
351 Latches.push_back(New);
353 NewBlocks.push_back(New);
356 // Remap all instructions in the most recent iteration
357 for (unsigned i = 0; i < NewBlocks.size(); ++i)
358 for (BasicBlock::iterator I = NewBlocks[i]->begin(),
359 E = NewBlocks[i]->end(); I != E; ++I)
360 ::RemapInstruction(I, LastValueMap);
363 // Loop over the PHI nodes in the original block, setting incoming values.
364 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
365 PHINode *PN = OrigPHINode[i];
366 if (CompletelyUnroll) {
367 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
368 Header->getInstList().erase(PN);
370 else if (Count > 1) {
371 Value *InVal = PN->removeIncomingValue(LatchBlock, false);
372 // If this value was defined in the loop, take the value defined by the
373 // last iteration of the loop.
374 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
375 if (L->contains(InValI))
376 InVal = LastValueMap[InVal];
378 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch");
379 PN->addIncoming(InVal, Latches.back());
383 // Now that all the basic blocks for the unrolled iterations are in place,
384 // set up the branches to connect them.
385 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
386 // The original branch was replicated in each unrolled iteration.
387 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
389 // The branch destination.
390 unsigned j = (i + 1) % e;
391 BasicBlock *Dest = Headers[j];
392 bool NeedConditional = true;
394 if (RuntimeTripCount && j != 0) {
395 NeedConditional = false;
398 // For a complete unroll, make the last iteration end with a branch
399 // to the exit block.
400 if (CompletelyUnroll && j == 0) {
402 NeedConditional = false;
405 // If we know the trip count or a multiple of it, we can safely use an
406 // unconditional branch for some iterations.
407 if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) {
408 NeedConditional = false;
411 if (NeedConditional) {
412 // Update the conditional branch's successor for the following
414 Term->setSuccessor(!ContinueOnTrue, Dest);
416 // Remove phi operands at this loop exit
417 if (Dest != LoopExit) {
418 BasicBlock *BB = Latches[i];
419 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
421 if (*SI == Headers[i])
423 for (BasicBlock::iterator BBI = (*SI)->begin();
424 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) {
425 Phi->removeIncomingValue(BB, false);
429 // Replace the conditional branch with an unconditional one.
430 BranchInst::Create(Dest, Term);
431 Term->eraseFromParent();
435 // Merge adjacent basic blocks, if possible.
436 SmallPtrSet<Loop *, 4> ForgottenLoops;
437 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
438 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
439 if (Term->isUnconditional()) {
440 BasicBlock *Dest = Term->getSuccessor(0);
441 if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM,
443 std::replace(Latches.begin(), Latches.end(), Dest, Fold);
447 // FIXME: We could register any cloned assumptions instead of clearing the
448 // whole function's cache.
449 AT->forgetCachedAssumptions(F);
451 DominatorTree *DT = nullptr;
453 // FIXME: Reconstruct dom info, because it is not preserved properly.
454 // Incrementally updating domtree after loop unrolling would be easy.
455 if (DominatorTreeWrapperPass *DTWP =
456 PP->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
457 DT = &DTWP->getDomTree();
458 DT->recalculate(*L->getHeader()->getParent());
461 // Simplify any new induction variables in the partially unrolled loop.
462 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
463 if (SE && !CompletelyUnroll) {
464 SmallVector<WeakVH, 16> DeadInsts;
465 simplifyLoopIVs(L, SE, LPM, DeadInsts);
467 // Aggressively clean up dead instructions that simplifyLoopIVs already
468 // identified. Any remaining should be cleaned up below.
469 while (!DeadInsts.empty())
470 if (Instruction *Inst =
471 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
472 RecursivelyDeleteTriviallyDeadInstructions(Inst);
475 // At this point, the code is well formed. We now do a quick sweep over the
476 // inserted code, doing constant propagation and dead code elimination as we
478 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks();
479 for (std::vector<BasicBlock*>::const_iterator BB = NewLoopBlocks.begin(),
480 BBE = NewLoopBlocks.end(); BB != BBE; ++BB)
481 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E; ) {
482 Instruction *Inst = I++;
484 if (isInstructionTriviallyDead(Inst))
485 (*BB)->getInstList().erase(Inst);
486 else if (Value *V = SimplifyInstruction(Inst))
487 if (LI->replacementPreservesLCSSAForm(Inst, V)) {
488 Inst->replaceAllUsesWith(V);
489 (*BB)->getInstList().erase(Inst);
493 NumCompletelyUnrolled += CompletelyUnroll;
496 Loop *OuterL = L->getParentLoop();
497 // Remove the loop from the LoopPassManager if it's completely removed.
498 if (CompletelyUnroll && LPM != nullptr)
499 LPM->deleteLoopFromQueue(L);
501 // If we have a pass and a DominatorTree we should re-simplify impacted loops
502 // to ensure subsequent analyses can rely on this form. We want to simplify
503 // at least one layer outside of the loop that was unrolled so that any
504 // changes to the parent loop exposed by the unrolling are considered.
506 if (!OuterL && !CompletelyUnroll)
509 DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
510 const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
511 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
512 simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL, AT);
514 // LCSSA must be performed on the outermost affected loop. The unrolled
515 // loop's last loop latch is guaranteed to be in the outermost loop after
516 // deleteLoopFromQueue updates LoopInfo.
517 Loop *LatchLoop = LI->getLoopFor(Latches.back());
518 if (!OuterL->contains(LatchLoop))
519 while (OuterL->getParentLoop() != LatchLoop)
520 OuterL = OuterL->getParentLoop();
522 formLCSSARecursively(*OuterL, *DT, SE);