1 //===-- InductiveRangeCheckElimination.cpp - ------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // The InductiveRangeCheckElimination pass splits a loop's iteration space into
10 // three disjoint ranges. It does that in a way such that the loop running in
11 // the middle loop provably does not need range checks. As an example, it will
14 // len = < known positive >
15 // for (i = 0; i < n; i++) {
16 // if (0 <= i && i < len) {
19 // throw_out_of_bounds();
25 // len = < known positive >
26 // limit = smin(n, len)
27 // // no first segment
28 // for (i = 0; i < limit; i++) {
29 // if (0 <= i && i < len) { // this check is fully redundant
32 // throw_out_of_bounds();
35 // for (i = limit; i < n; i++) {
36 // if (0 <= i && i < len) {
39 // throw_out_of_bounds();
42 //===----------------------------------------------------------------------===//
44 #include "llvm/ADT/Optional.h"
46 #include "llvm/Analysis/InstructionSimplify.h"
47 #include "llvm/Analysis/LoopInfo.h"
48 #include "llvm/Analysis/LoopPass.h"
49 #include "llvm/Analysis/ScalarEvolution.h"
50 #include "llvm/Analysis/ScalarEvolutionExpander.h"
51 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
52 #include "llvm/Analysis/ValueTracking.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IRBuilder.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/ValueHandle.h"
61 #include "llvm/IR/Verifier.h"
63 #include "llvm/Support/Debug.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Cloning.h"
68 #include "llvm/Transforms/Utils/LoopUtils.h"
69 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
70 #include "llvm/Transforms/Utils/UnrollLoop.h"
72 #include "llvm/Pass.h"
78 cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
81 cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden,
84 #define DEBUG_TYPE "irce"
88 /// An inductive range check is conditional branch in a loop with
90 /// 1. a very cold successor (i.e. the branch jumps to that successor very
95 /// 2. a condition that is provably true for some range of values taken by the
96 /// containing loop's induction variable.
98 /// Currently all inductive range checks are branches conditional on an
99 /// expression of the form
101 /// 0 <= (Offset + Scale * I) < Length
103 /// where `I' is the canonical induction variable of a loop to which Offset and
104 /// Scale are loop invariant, and Length is >= 0. Currently the 'false' branch
105 /// is considered cold, looking at profiling data to verify that is a TODO.
107 class InductiveRangeCheck {
113 InductiveRangeCheck() :
114 Offset(nullptr), Scale(nullptr), Length(nullptr), Branch(nullptr) { }
117 const SCEV *getOffset() const { return Offset; }
118 const SCEV *getScale() const { return Scale; }
119 Value *getLength() const { return Length; }
121 void print(raw_ostream &OS) const {
122 OS << "InductiveRangeCheck:\n";
130 getBranch()->print(OS);
133 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
139 BranchInst *getBranch() const { return Branch; }
141 /// Represents an integer range [Range.first, Range.second). If Range.second
142 /// < Range.first, then the value denotes the empty range.
143 typedef std::pair<Value *, Value *> Range;
144 typedef SpecificBumpPtrAllocator<InductiveRangeCheck> AllocatorTy;
146 /// This is the value the condition of the branch needs to evaluate to for the
147 /// branch to take the hot successor (see (1) above).
148 bool getPassingDirection() { return true; }
150 /// Computes a range for the induction variable in which the range check is
151 /// redundant and can be constant-folded away.
152 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
153 IRBuilder<> &B) const;
155 /// Create an inductive range check out of BI if possible, else return
157 static InductiveRangeCheck *create(AllocatorTy &Alloc, BranchInst *BI,
158 Loop *L, ScalarEvolution &SE);
161 class InductiveRangeCheckElimination : public LoopPass {
162 InductiveRangeCheck::AllocatorTy Allocator;
166 InductiveRangeCheckElimination() : LoopPass(ID) {
167 initializeInductiveRangeCheckEliminationPass(
168 *PassRegistry::getPassRegistry());
171 void getAnalysisUsage(AnalysisUsage &AU) const override {
172 AU.addRequired<LoopInfoWrapperPass>();
173 AU.addRequiredID(LoopSimplifyID);
174 AU.addRequiredID(LCSSAID);
175 AU.addRequired<ScalarEvolution>();
178 bool runOnLoop(Loop *L, LPPassManager &LPM) override;
181 char InductiveRangeCheckElimination::ID = 0;
184 INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
185 "Inductive range check elimination", false, false)
187 static bool IsLowerBoundCheck(Value *Check, Value *&IndexV) {
188 using namespace llvm::PatternMatch;
190 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
191 Value *LHS = nullptr, *RHS = nullptr;
193 if (!match(Check, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
200 case ICmpInst::ICMP_SLE:
203 case ICmpInst::ICMP_SGE:
204 if (!match(RHS, m_ConstantInt<0>()))
209 case ICmpInst::ICMP_SLT:
212 case ICmpInst::ICMP_SGT:
213 if (!match(RHS, m_ConstantInt<-1>()))
220 static bool IsUpperBoundCheck(Value *Check, Value *Index, Value *&UpperLimit) {
221 using namespace llvm::PatternMatch;
223 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
224 Value *LHS = nullptr, *RHS = nullptr;
226 if (!match(Check, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
233 case ICmpInst::ICMP_SGT:
236 case ICmpInst::ICMP_SLT:
242 case ICmpInst::ICMP_UGT:
245 case ICmpInst::ICMP_ULT:
253 /// Split a condition into something semantically equivalent to (0 <= I <
254 /// Limit), both comparisons signed and Len loop invariant on L and positive.
255 /// On success, return true and set Index to I and UpperLimit to Limit. Return
256 /// false on failure (we may still write to UpperLimit and Index on failure).
257 /// It does not try to interpret I as a loop index.
259 static bool SplitRangeCheckCondition(Loop *L, ScalarEvolution &SE,
260 Value *Condition, const SCEV *&Index,
261 Value *&UpperLimit) {
263 // TODO: currently this catches some silly cases like comparing "%idx slt 1".
264 // Our transformations are still correct, but less likely to be profitable in
265 // those cases. We have to come up with some heuristics that pick out the
266 // range checks that are more profitable to clone a loop for. This function
267 // in general can be made more robust.
269 using namespace llvm::PatternMatch;
273 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
275 // In these early checks we assume that the matched UpperLimit is positive.
276 // We'll verify that fact later, before returning true.
278 if (match(Condition, m_And(m_Value(A), m_Value(B)))) {
279 Value *IndexV = nullptr;
280 Value *ExpectedUpperBoundCheck = nullptr;
282 if (IsLowerBoundCheck(A, IndexV))
283 ExpectedUpperBoundCheck = B;
284 else if (IsLowerBoundCheck(B, IndexV))
285 ExpectedUpperBoundCheck = A;
289 if (!IsUpperBoundCheck(ExpectedUpperBoundCheck, IndexV, UpperLimit))
292 Index = SE.getSCEV(IndexV);
294 if (isa<SCEVCouldNotCompute>(Index))
297 } else if (match(Condition, m_ICmp(Pred, m_Value(A), m_Value(B)))) {
302 case ICmpInst::ICMP_SGT:
305 case ICmpInst::ICMP_SLT:
307 Index = SE.getSCEV(A);
308 if (isa<SCEVCouldNotCompute>(Index) || !SE.isKnownNonNegative(Index))
312 case ICmpInst::ICMP_UGT:
315 case ICmpInst::ICMP_ULT:
317 Index = SE.getSCEV(A);
318 if (isa<SCEVCouldNotCompute>(Index))
326 const SCEV *UpperLimitSCEV = SE.getSCEV(UpperLimit);
327 if (isa<SCEVCouldNotCompute>(UpperLimitSCEV) ||
328 !SE.isKnownNonNegative(UpperLimitSCEV))
331 if (SE.getLoopDisposition(UpperLimitSCEV, L) !=
332 ScalarEvolution::LoopInvariant) {
333 DEBUG(dbgs() << " in function: " << L->getHeader()->getParent()->getName()
335 dbgs() << " UpperLimit is not loop invariant: "
336 << UpperLimit->getName() << "\n";);
343 InductiveRangeCheck *
344 InductiveRangeCheck::create(InductiveRangeCheck::AllocatorTy &A, BranchInst *BI,
345 Loop *L, ScalarEvolution &SE) {
347 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
350 Value *Length = nullptr;
351 const SCEV *IndexSCEV = nullptr;
353 if (!SplitRangeCheckCondition(L, SE, BI->getCondition(), IndexSCEV, Length))
356 assert(IndexSCEV && Length && "contract with SplitRangeCheckCondition!");
358 const SCEVAddRecExpr *IndexAddRec = dyn_cast<SCEVAddRecExpr>(IndexSCEV);
360 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine();
365 InductiveRangeCheck *IRC = new (A.Allocate()) InductiveRangeCheck;
366 IRC->Length = Length;
367 IRC->Offset = IndexAddRec->getStart();
368 IRC->Scale = IndexAddRec->getStepRecurrence(SE);
373 static Value *MaybeSimplify(Value *V) {
374 if (Instruction *I = dyn_cast<Instruction>(V))
375 if (Value *Simplified = SimplifyInstruction(I))
380 static Value *ConstructSMinOf(Value *X, Value *Y, IRBuilder<> &B) {
381 return MaybeSimplify(B.CreateSelect(B.CreateICmpSLT(X, Y), X, Y));
384 static Value *ConstructSMaxOf(Value *X, Value *Y, IRBuilder<> &B) {
385 return MaybeSimplify(B.CreateSelect(B.CreateICmpSGT(X, Y), X, Y));
390 /// This class is used to constrain loops to run within a given iteration space.
391 /// The algorithm this class implements is given a Loop and a range [Begin,
392 /// End). The algorithm then tries to break out a "main loop" out of the loop
393 /// it is given in a way that the "main loop" runs with the induction variable
394 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post
395 /// loops to run any remaining iterations. The pre loop runs any iterations in
396 /// which the induction variable is < Begin, and the post loop runs any
397 /// iterations in which the induction variable is >= End.
399 class LoopConstrainer {
401 // Keeps track of the structure of a loop. This is similar to llvm::Loop,
402 // except that it is more lightweight and can track the state of a loop
403 // through changing and potentially invalid IR. This structure also
404 // formalizes the kinds of loops we can deal with -- ones that have a single
405 // latch that is also an exiting block *and* have a canonical induction
407 struct LoopStructure {
413 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th
414 // successor is `LatchExit', the exit block of the loop.
416 BasicBlock *LatchExit;
417 unsigned LatchBrExitIdx;
419 // The canonical induction variable. It's value is `CIVStart` on the 0th
420 // itertion and `CIVNext` for all iterations after that.
425 LoopStructure() : Tag(""), Header(nullptr), Latch(nullptr),
426 LatchBr(nullptr), LatchExit(nullptr),
427 LatchBrExitIdx(-1), CIV(nullptr),
428 CIVStart(nullptr), CIVNext(nullptr) { }
430 template <typename M> LoopStructure map(M Map) const {
431 LoopStructure Result;
433 Result.Header = cast<BasicBlock>(Map(Header));
434 Result.Latch = cast<BasicBlock>(Map(Latch));
435 Result.LatchBr = cast<BranchInst>(Map(LatchBr));
436 Result.LatchExit = cast<BasicBlock>(Map(LatchExit));
437 Result.LatchBrExitIdx = LatchBrExitIdx;
438 Result.CIV = cast<PHINode>(Map(CIV));
439 Result.CIVNext = Map(CIVNext);
440 Result.CIVStart = Map(CIVStart);
445 // The representation of a clone of the original loop we started out with.
448 std::vector<BasicBlock *> Blocks;
450 // `Map` maps values in the clonee into values in the cloned version
451 ValueToValueMapTy Map;
453 // An instance of `LoopStructure` for the cloned loop
454 LoopStructure Structure;
457 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for
458 // more details on what these fields mean.
459 struct RewrittenRangeInfo {
460 BasicBlock *PseudoExit;
461 BasicBlock *ExitSelector;
462 std::vector<PHINode *> PHIValuesAtPseudoExit;
464 RewrittenRangeInfo() : PseudoExit(nullptr), ExitSelector(nullptr) { }
467 // Calculated subranges we restrict the iteration space of the main loop to.
468 // See the implementation of `calculateSubRanges' for more details on how
469 // these fields are computed. `ExitPreLoopAt' is `None' if we don't need a
470 // pre loop. `ExitMainLoopAt' is `None' if we don't need a post loop.
472 Optional<Value *> ExitPreLoopAt;
473 Optional<Value *> ExitMainLoopAt;
476 // A utility function that does a `replaceUsesOfWith' on the incoming block
477 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's
478 // incoming block list with `ReplaceBy'.
479 static void replacePHIBlock(PHINode *PN, BasicBlock *Block,
480 BasicBlock *ReplaceBy);
482 // Try to "parse" `OriginalLoop' and populate the various out parameters.
483 // Returns true on success, false on failure.
485 bool recognizeLoop(LoopStructure &LoopStructureOut,
486 const SCEV *&LatchCountOut, BasicBlock *&PreHeaderOut,
487 const char *&FailureReasonOut) const;
489 // Compute a safe set of limits for the main loop to run in -- effectively the
490 // intersection of `Range' and the iteration space of the original loop.
491 // Return the header count (1 + the latch taken count) in `HeaderCount'.
492 // Return None if unable to compute the set of subranges.
494 Optional<SubRanges> calculateSubRanges(Value *&HeaderCount) const;
496 // Clone `OriginalLoop' and return the result in CLResult. The IR after
497 // running `cloneLoop' is well formed except for the PHI nodes in CLResult --
498 // the PHI nodes say that there is an incoming edge from `OriginalPreheader`
499 // but there is no such edge.
501 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const;
503 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The
504 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the
505 // iteration space is not changed. `ExitLoopAt' is assumed to be slt
506 // `OriginalHeaderCount'.
508 // If there are iterations left to execute, control is made to jump to
509 // `ContinuationBlock', otherwise they take the normal loop exit. The
510 // returned `RewrittenRangeInfo' object is populated as follows:
512 // .PseudoExit is a basic block that unconditionally branches to
513 // `ContinuationBlock'.
515 // .ExitSelector is a basic block that decides, on exit from the loop,
516 // whether to branch to the "true" exit or to `PseudoExit'.
518 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value
519 // for each PHINode in the loop header on taking the pseudo exit.
521 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate
522 // preheader because it is made to branch to the loop header only
526 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader,
528 BasicBlock *ContinuationBlock) const;
530 // The loop denoted by `LS' has `OldPreheader' as its preheader. This
531 // function creates a new preheader for `LS' and returns it.
533 BasicBlock *createPreheader(const LoopConstrainer::LoopStructure &LS,
534 BasicBlock *OldPreheader, const char *Tag) const;
536 // `ContinuationBlockAndPreheader' was the continuation block for some call to
537 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'.
538 // This function rewrites the PHI nodes in `LS.Header' to start with the
540 void rewriteIncomingValuesForPHIs(
541 LoopConstrainer::LoopStructure &LS,
542 BasicBlock *ContinuationBlockAndPreheader,
543 const LoopConstrainer::RewrittenRangeInfo &RRI) const;
545 // Even though we do not preserve any passes at this time, we at least need to
546 // keep the parent loop structure consistent. The `LPPassManager' seems to
547 // verify this after running a loop pass. This function adds the list of
548 // blocks denoted by the iterator range [BlocksBegin, BlocksEnd) to this loops
549 // parent loop if required.
550 template<typename IteratorTy>
551 void addToParentLoopIfNeeded(IteratorTy BlocksBegin, IteratorTy BlocksEnd);
553 // Some global state.
558 // Information about the original loop we started out with.
560 LoopInfo &OriginalLoopInfo;
561 const SCEV *LatchTakenCount;
562 BasicBlock *OriginalPreheader;
563 Value *OriginalHeaderCount;
565 // The preheader of the main loop. This may or may not be different from
566 // `OriginalPreheader'.
567 BasicBlock *MainLoopPreheader;
569 // The range we need to run the main loop in.
570 InductiveRangeCheck::Range Range;
572 // The structure of the main loop (see comment at the beginning of this class
574 LoopStructure MainLoopStructure;
577 LoopConstrainer(Loop &L, LoopInfo &LI, ScalarEvolution &SE,
578 InductiveRangeCheck::Range R)
579 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), SE(SE),
580 OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr),
581 OriginalPreheader(nullptr), OriginalHeaderCount(nullptr),
582 MainLoopPreheader(nullptr), Range(R) { }
584 // Entry point for the algorithm. Returns true on success.
590 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
591 BasicBlock *ReplaceBy) {
592 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
593 if (PN->getIncomingBlock(i) == Block)
594 PN->setIncomingBlock(i, ReplaceBy);
597 bool LoopConstrainer::recognizeLoop(LoopStructure &LoopStructureOut,
598 const SCEV *&LatchCountOut,
599 BasicBlock *&PreheaderOut,
600 const char *&FailureReason) const {
601 using namespace llvm::PatternMatch;
603 assert(OriginalLoop.isLoopSimplifyForm() &&
604 "should follow from addRequired<>");
606 BasicBlock *Latch = OriginalLoop.getLoopLatch();
607 if (!OriginalLoop.isLoopExiting(Latch)) {
608 FailureReason = "no loop latch";
612 PHINode *CIV = OriginalLoop.getCanonicalInductionVariable();
614 FailureReason = "no CIV";
618 BasicBlock *Header = OriginalLoop.getHeader();
619 BasicBlock *Preheader = OriginalLoop.getLoopPreheader();
621 FailureReason = "no preheader";
625 Value *CIVNext = CIV->getIncomingValueForBlock(Latch);
626 Value *CIVStart = CIV->getIncomingValueForBlock(Preheader);
628 const SCEV *LatchCount = SE.getExitCount(&OriginalLoop, Latch);
629 if (isa<SCEVCouldNotCompute>(LatchCount)) {
630 FailureReason = "could not compute latch count";
634 // While SCEV does most of the analysis for us, we still have to
635 // modify the latch; and currently we can only deal with certain
636 // kinds of latches. This can be made more sophisticated as needed.
638 BranchInst *LatchBr = dyn_cast<BranchInst>(&*Latch->rbegin());
640 if (!LatchBr || LatchBr->isUnconditional()) {
641 FailureReason = "latch terminator not conditional branch";
645 // Currently we only support a latch condition of the form:
647 // %condition = icmp slt %civNext, %limit
648 // br i1 %condition, label %header, label %exit
650 if (LatchBr->getSuccessor(0) != Header) {
651 FailureReason = "unknown latch form (header not first successor)";
655 Value *CIVComparedTo = nullptr;
656 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
657 if (!(match(LatchBr->getCondition(),
658 m_ICmp(Pred, m_Specific(CIVNext), m_Value(CIVComparedTo))) &&
659 Pred == ICmpInst::ICMP_SLT)) {
660 FailureReason = "unknown latch form (not slt)";
664 // IndVarSimplify will sometimes leave behind (in SCEV's cache) backedge-taken
665 // counts that are narrower than the canonical induction variable. These
666 // values are still accurate, and we could probably use them after sign/zero
667 // extension; but for now we just bail out of the transformation to keep
669 const SCEV *CIVComparedToSCEV = SE.getSCEV(CIVComparedTo);
670 if (isa<SCEVCouldNotCompute>(CIVComparedToSCEV) ||
671 CIVComparedToSCEV->getType() != LatchCount->getType()) {
672 FailureReason = "could not relate CIV to latch expression";
676 const SCEV *ShouldBeOne = SE.getMinusSCEV(CIVComparedToSCEV, LatchCount);
677 const SCEVConstant *SCEVOne = dyn_cast<SCEVConstant>(ShouldBeOne);
678 if (!SCEVOne || SCEVOne->getValue()->getValue() != 1) {
679 FailureReason = "unexpected header count in latch";
683 unsigned LatchBrExitIdx = 1;
684 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx);
686 assert(SE.getLoopDisposition(LatchCount, &OriginalLoop) ==
687 ScalarEvolution::LoopInvariant &&
688 "loop variant exit count doesn't make sense!");
690 assert(!OriginalLoop.contains(LatchExit) && "expected an exit block!");
692 LoopStructureOut.Tag = "main";
693 LoopStructureOut.Header = Header;
694 LoopStructureOut.Latch = Latch;
695 LoopStructureOut.LatchBr = LatchBr;
696 LoopStructureOut.LatchExit = LatchExit;
697 LoopStructureOut.LatchBrExitIdx = LatchBrExitIdx;
698 LoopStructureOut.CIV = CIV;
699 LoopStructureOut.CIVNext = CIVNext;
700 LoopStructureOut.CIVStart = CIVStart;
702 LatchCountOut = LatchCount;
703 PreheaderOut = Preheader;
704 FailureReason = nullptr;
709 Optional<LoopConstrainer::SubRanges>
710 LoopConstrainer::calculateSubRanges(Value *&HeaderCountOut) const {
711 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType());
713 assert(Range.first->getType() == Range.second->getType() &&
715 if (Range.first->getType() != Ty)
718 SCEVExpander Expander(SE, "irce");
719 Instruction *InsertPt = OriginalPreheader->getTerminator();
722 MaybeSimplify(Expander.expandCodeFor(LatchTakenCount, Ty, InsertPt));
724 IRBuilder<> B(InsertPt);
726 LoopConstrainer::SubRanges Result;
728 // I think we can be more aggressive here and make this nuw / nsw if the
729 // addition that feeds into the icmp for the latch's terminating branch is nuw
730 // / nsw. In any case, a wrapping 2's complement addition is safe.
731 ConstantInt *One = ConstantInt::get(Ty, 1);
732 HeaderCountOut = MaybeSimplify(B.CreateAdd(LatchCountV, One, "header.count"));
734 const SCEV *RangeBegin = SE.getSCEV(Range.first);
735 const SCEV *RangeEnd = SE.getSCEV(Range.second);
736 const SCEV *HeaderCountSCEV = SE.getSCEV(HeaderCountOut);
737 const SCEV *Zero = SE.getConstant(Ty, 0);
739 // In some cases we can prove that we don't need a pre or post loop
741 bool ProvablyNoPreloop =
742 SE.isKnownPredicate(ICmpInst::ICMP_SLE, RangeBegin, Zero);
743 if (!ProvablyNoPreloop)
744 Result.ExitPreLoopAt = ConstructSMinOf(HeaderCountOut, Range.first, B);
746 bool ProvablyNoPostLoop =
747 SE.isKnownPredicate(ICmpInst::ICMP_SLE, HeaderCountSCEV, RangeEnd);
748 if (!ProvablyNoPostLoop)
749 Result.ExitMainLoopAt = ConstructSMinOf(HeaderCountOut, Range.second, B);
754 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result,
755 const char *Tag) const {
756 for (BasicBlock *BB : OriginalLoop.getBlocks()) {
757 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F);
758 Result.Blocks.push_back(Clone);
759 Result.Map[BB] = Clone;
762 auto GetClonedValue = [&Result](Value *V) {
763 assert(V && "null values not in domain!");
764 auto It = Result.Map.find(V);
765 if (It == Result.Map.end())
767 return static_cast<Value *>(It->second);
770 Result.Structure = MainLoopStructure.map(GetClonedValue);
771 Result.Structure.Tag = Tag;
773 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) {
774 BasicBlock *ClonedBB = Result.Blocks[i];
775 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i];
777 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!");
779 for (Instruction &I : *ClonedBB)
780 RemapInstruction(&I, Result.Map,
781 RF_NoModuleLevelChanges | RF_IgnoreMissingEntries);
783 // Exit blocks will now have one more predecessor and their PHI nodes need
784 // to be edited to reflect that. No phi nodes need to be introduced because
785 // the loop is in LCSSA.
787 for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB);
788 SBBI != SBBE; ++SBBI) {
790 if (OriginalLoop.contains(*SBBI))
791 continue; // not an exit block
793 for (Instruction &I : **SBBI) {
794 if (!isa<PHINode>(&I))
797 PHINode *PN = cast<PHINode>(&I);
798 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB);
799 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB);
805 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd(
806 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitLoopAt,
807 BasicBlock *ContinuationBlock) const {
809 // We start with a loop with a single latch:
811 // +--------------------+
815 // +--------+-----------+
816 // | ----------------\
818 // +--------v----v------+ |
822 // +--------------------+ |
826 // +--------------------+ |
828 // | latch >----------/
830 // +-------v------------+
833 // | +--------------------+
835 // +---> original exit |
837 // +--------------------+
839 // We change the control flow to look like
842 // +--------------------+
844 // | preheader >-------------------------+
846 // +--------v-----------+ |
847 // | /-------------+ |
849 // +--------v--v--------+ | |
851 // | header | | +--------+ |
853 // +--------------------+ | | +-----v-----v-----------+
855 // | | | .pseudo.exit |
857 // | | +-----------v-----------+
860 // | | +--------v-------------+
861 // +--------------------+ | | | |
862 // | | | | | ContinuationBlock |
863 // | latch >------+ | | |
864 // | | | +----------------------+
865 // +---------v----------+ |
868 // | +---------------^-----+
870 // +-----> .exit.selector |
872 // +----------v----------+
874 // +--------------------+ |
876 // | original exit <----+
878 // +--------------------+
881 RewrittenRangeInfo RRI;
883 auto BBInsertLocation = std::next(Function::iterator(LS.Latch));
884 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector",
885 &F, BBInsertLocation);
886 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F,
889 BranchInst *PreheaderJump = cast<BranchInst>(&*Preheader->rbegin());
891 IRBuilder<> B(PreheaderJump);
893 // EnterLoopCond - is it okay to start executing this `LS'?
894 Value *EnterLoopCond = B.CreateICmpSLT(LS.CIVStart, ExitLoopAt);
895 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit);
896 PreheaderJump->eraseFromParent();
898 assert(LS.LatchBrExitIdx == 1 && "generalize this as needed!");
900 B.SetInsertPoint(LS.LatchBr);
902 // ContinueCond - is it okay to execute the next iteration in `LS'?
903 Value *ContinueCond = B.CreateICmpSLT(LS.CIVNext, ExitLoopAt);
905 LS.LatchBr->setCondition(ContinueCond);
906 assert(LS.LatchBr->getSuccessor(LS.LatchBrExitIdx) == LS.LatchExit &&
908 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector);
910 B.SetInsertPoint(RRI.ExitSelector);
912 // IterationsLeft - are there any more iterations left, given the original
913 // upper bound on the induction variable? If not, we branch to the "real"
915 Value *IterationsLeft = B.CreateICmpSLT(LS.CIVNext, OriginalHeaderCount);
916 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit);
918 BranchInst *BranchToContinuation =
919 BranchInst::Create(ContinuationBlock, RRI.PseudoExit);
921 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of
922 // each of the PHI nodes in the loop header. This feeds into the initial
923 // value of the same PHI nodes if/when we continue execution.
924 for (Instruction &I : *LS.Header) {
925 if (!isa<PHINode>(&I))
928 PHINode *PN = cast<PHINode>(&I);
930 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy",
931 BranchToContinuation);
933 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader);
934 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch),
936 RRI.PHIValuesAtPseudoExit.push_back(NewPHI);
939 // The latch exit now has a branch from `RRI.ExitSelector' instead of
940 // `LS.Latch'. The PHI nodes need to be updated to reflect that.
941 for (Instruction &I : *LS.LatchExit) {
942 if (PHINode *PN = dyn_cast<PHINode>(&I))
943 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector);
951 void LoopConstrainer::rewriteIncomingValuesForPHIs(
952 LoopConstrainer::LoopStructure &LS, BasicBlock *ContinuationBlock,
953 const LoopConstrainer::RewrittenRangeInfo &RRI) const {
955 unsigned PHIIndex = 0;
956 for (Instruction &I : *LS.Header) {
957 if (!isa<PHINode>(&I))
960 PHINode *PN = cast<PHINode>(&I);
962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
963 if (PN->getIncomingBlock(i) == ContinuationBlock)
964 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]);
967 LS.CIVStart = LS.CIV->getIncomingValueForBlock(ContinuationBlock);
971 LoopConstrainer::createPreheader(const LoopConstrainer::LoopStructure &LS,
972 BasicBlock *OldPreheader,
973 const char *Tag) const {
975 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header);
976 BranchInst::Create(LS.Header, Preheader);
978 for (Instruction &I : *LS.Header) {
979 if (!isa<PHINode>(&I))
982 PHINode *PN = cast<PHINode>(&I);
983 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
984 replacePHIBlock(PN, OldPreheader, Preheader);
990 template<typename IteratorTy>
991 void LoopConstrainer::addToParentLoopIfNeeded(IteratorTy Begin,
993 Loop *ParentLoop = OriginalLoop.getParentLoop();
997 for (; Begin != End; Begin++)
998 ParentLoop->addBasicBlockToLoop(*Begin, OriginalLoopInfo);
1001 bool LoopConstrainer::run() {
1002 BasicBlock *Preheader = nullptr;
1003 const char *CouldNotProceedBecause = nullptr;
1004 if (!recognizeLoop(MainLoopStructure, LatchTakenCount, Preheader,
1005 CouldNotProceedBecause)) {
1006 DEBUG(dbgs() << "irce: could not recognize loop, " << CouldNotProceedBecause
1011 OriginalPreheader = Preheader;
1012 MainLoopPreheader = Preheader;
1014 Optional<SubRanges> MaybeSR = calculateSubRanges(OriginalHeaderCount);
1015 if (!MaybeSR.hasValue()) {
1016 DEBUG(dbgs() << "irce: could not compute subranges\n");
1019 SubRanges SR = MaybeSR.getValue();
1021 // It would have been better to make `PreLoop' and `PostLoop'
1022 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
1024 ClonedLoop PreLoop, PostLoop;
1025 bool NeedsPreLoop = SR.ExitPreLoopAt.hasValue();
1026 bool NeedsPostLoop = SR.ExitMainLoopAt.hasValue();
1028 // We clone these ahead of time so that we don't have to deal with changing
1029 // and temporarily invalid IR as we transform the loops.
1031 cloneLoop(PreLoop, "preloop");
1033 cloneLoop(PostLoop, "postloop");
1035 RewrittenRangeInfo PreLoopRRI;
1038 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header,
1039 PreLoop.Structure.Header);
1042 createPreheader(MainLoopStructure, Preheader, "mainloop");
1044 changeIterationSpaceEnd(PreLoop.Structure, Preheader,
1045 SR.ExitPreLoopAt.getValue(), MainLoopPreheader);
1046 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader,
1050 BasicBlock *PostLoopPreheader = nullptr;
1051 RewrittenRangeInfo PostLoopRRI;
1053 if (NeedsPostLoop) {
1055 createPreheader(PostLoop.Structure, Preheader, "postloop");
1056 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader,
1057 SR.ExitMainLoopAt.getValue(),
1059 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader,
1063 SmallVector<BasicBlock *, 6> NewBlocks;
1064 NewBlocks.push_back(PostLoopPreheader);
1065 NewBlocks.push_back(PreLoopRRI.PseudoExit);
1066 NewBlocks.push_back(PreLoopRRI.ExitSelector);
1067 NewBlocks.push_back(PostLoopRRI.PseudoExit);
1068 NewBlocks.push_back(PostLoopRRI.ExitSelector);
1069 if (MainLoopPreheader != Preheader)
1070 NewBlocks.push_back(MainLoopPreheader);
1072 // Some of the above may be nullptr, filter them out before passing to
1073 // addToParentLoopIfNeeded.
1074 auto NewBlocksEnd = std::remove(NewBlocks.begin(), NewBlocks.end(), nullptr);
1076 typedef SmallVector<BasicBlock *, 6>::iterator SmallVectItTy;
1077 typedef std::vector<BasicBlock *>::iterator StdVectItTy;
1079 addToParentLoopIfNeeded<SmallVectItTy>(NewBlocks.begin(), NewBlocksEnd);
1080 addToParentLoopIfNeeded<StdVectItTy>(PreLoop.Blocks.begin(),
1081 PreLoop.Blocks.end());
1082 addToParentLoopIfNeeded<StdVectItTy>(PostLoop.Blocks.begin(),
1083 PostLoop.Blocks.end());
1088 /// Computes and returns a range of values for the induction variable in which
1089 /// the range check can be safely elided. If it cannot compute such a range,
1091 Optional<InductiveRangeCheck::Range>
1092 InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
1093 IRBuilder<> &B) const {
1095 // Currently we support inequalities of the form:
1097 // 0 <= Offset + 1 * CIV < L given L >= 0
1099 // The inequality is satisfied by -Offset <= CIV < (L - Offset) [^1]. All
1100 // additions and subtractions are twos-complement wrapping and comparisons are
1105 // If there exists CIV such that -Offset <= CIV < (L - Offset) then it
1106 // follows that -Offset <= (-Offset + L) [== Eq. 1]. Since L >= 0, if
1107 // (-Offset + L) sign-overflows then (-Offset + L) < (-Offset). Hence by
1108 // [Eq. 1], (-Offset + L) could not have overflown.
1110 // This means CIV = t + (-Offset) for t in [0, L). Hence (CIV + Offset) =
1111 // t. Hence 0 <= (CIV + Offset) < L
1113 // [^1]: Note that the solution does _not_ apply if L < 0; consider values
1114 // Offset = 127, CIV = 126 and L = -2 in an i8 world.
1116 const SCEVConstant *ScaleC = dyn_cast<SCEVConstant>(getScale());
1117 if (!(ScaleC && ScaleC->getValue()->getValue() == 1)) {
1118 DEBUG(dbgs() << "irce: could not compute safe iteration space for:\n";
1123 Value *OffsetV = SCEVExpander(SE, "safe.itr.space").expandCodeFor(
1124 getOffset(), getOffset()->getType(), B.GetInsertPoint());
1125 OffsetV = MaybeSimplify(OffsetV);
1127 Value *Begin = MaybeSimplify(B.CreateNeg(OffsetV));
1128 Value *End = MaybeSimplify(B.CreateSub(getLength(), OffsetV));
1130 return std::make_pair(Begin, End);
1133 static Optional<InductiveRangeCheck::Range>
1134 IntersectRange(const Optional<InductiveRangeCheck::Range> &R1,
1135 const InductiveRangeCheck::Range &R2, IRBuilder<> &B) {
1136 assert(R2.first->getType() == R2.second->getType() && "ill-typed range!");
1140 auto &R1Value = R1.getValue();
1142 // TODO: we could widen the smaller range and have this work; but for now we
1143 // bail out to keep things simple.
1144 if (R1Value.first->getType() != R2.first->getType())
1147 Value *NewMin = ConstructSMaxOf(R1Value.first, R2.first, B);
1148 Value *NewMax = ConstructSMinOf(R1Value.second, R2.second, B);
1149 return std::make_pair(NewMin, NewMax);
1152 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) {
1153 if (L->getBlocks().size() >= LoopSizeCutoff) {
1154 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";);
1158 BasicBlock *Preheader = L->getLoopPreheader();
1160 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
1164 LLVMContext &Context = Preheader->getContext();
1165 InductiveRangeCheck::AllocatorTy IRCAlloc;
1166 SmallVector<InductiveRangeCheck *, 16> RangeChecks;
1167 ScalarEvolution &SE = getAnalysis<ScalarEvolution>();
1169 for (auto BBI : L->getBlocks())
1170 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator()))
1171 if (InductiveRangeCheck *IRC =
1172 InductiveRangeCheck::create(IRCAlloc, TBI, L, SE))
1173 RangeChecks.push_back(IRC);
1175 if (RangeChecks.empty())
1178 DEBUG(dbgs() << "irce: looking at loop "; L->print(dbgs());
1179 dbgs() << "irce: loop has " << RangeChecks.size()
1180 << " inductive range checks: \n";
1181 for (InductiveRangeCheck *IRC : RangeChecks)
1185 Optional<InductiveRangeCheck::Range> SafeIterRange;
1186 Instruction *ExprInsertPt = Preheader->getTerminator();
1188 SmallVector<InductiveRangeCheck *, 4> RangeChecksToEliminate;
1190 IRBuilder<> B(ExprInsertPt);
1191 for (InductiveRangeCheck *IRC : RangeChecks) {
1192 auto Result = IRC->computeSafeIterationSpace(SE, B);
1193 if (Result.hasValue()) {
1194 auto MaybeSafeIterRange =
1195 IntersectRange(SafeIterRange, Result.getValue(), B);
1196 if (MaybeSafeIterRange.hasValue()) {
1197 RangeChecksToEliminate.push_back(IRC);
1198 SafeIterRange = MaybeSafeIterRange.getValue();
1203 if (!SafeIterRange.hasValue())
1206 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), SE,
1207 SafeIterRange.getValue());
1208 bool Changed = LC.run();
1211 auto PrintConstrainedLoopInfo = [L]() {
1212 dbgs() << "irce: in function ";
1213 dbgs() << L->getHeader()->getParent()->getName() << ": ";
1214 dbgs() << "constrained ";
1218 DEBUG(PrintConstrainedLoopInfo());
1220 if (PrintChangedLoops)
1221 PrintConstrainedLoopInfo();
1223 // Optimize away the now-redundant range checks.
1225 for (InductiveRangeCheck *IRC : RangeChecksToEliminate) {
1226 ConstantInt *FoldedRangeCheck = IRC->getPassingDirection()
1227 ? ConstantInt::getTrue(Context)
1228 : ConstantInt::getFalse(Context);
1229 IRC->getBranch()->setCondition(FoldedRangeCheck);
1236 Pass *llvm::createInductiveRangeCheckEliminationPass() {
1237 return new InductiveRangeCheckElimination;