1 //===-- InductiveRangeCheckElimination.cpp - ------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // The InductiveRangeCheckElimination pass splits a loop's iteration space into
10 // three disjoint ranges. It does that in a way such that the loop running in
11 // the middle loop provably does not need range checks. As an example, it will
14 // len = < known positive >
15 // for (i = 0; i < n; i++) {
16 // if (0 <= i && i < len) {
19 // throw_out_of_bounds();
25 // len = < known positive >
26 // limit = smin(n, len)
27 // // no first segment
28 // for (i = 0; i < limit; i++) {
29 // if (0 <= i && i < len) { // this check is fully redundant
32 // throw_out_of_bounds();
35 // for (i = limit; i < n; i++) {
36 // if (0 <= i && i < len) {
39 // throw_out_of_bounds();
42 //===----------------------------------------------------------------------===//
44 #include "llvm/ADT/Optional.h"
46 #include "llvm/Analysis/BranchProbabilityInfo.h"
47 #include "llvm/Analysis/InstructionSimplify.h"
48 #include "llvm/Analysis/LoopInfo.h"
49 #include "llvm/Analysis/LoopPass.h"
50 #include "llvm/Analysis/ScalarEvolution.h"
51 #include "llvm/Analysis/ScalarEvolutionExpander.h"
52 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
53 #include "llvm/Analysis/ValueTracking.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/Instructions.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/ValueHandle.h"
62 #include "llvm/IR/Verifier.h"
64 #include "llvm/Support/Debug.h"
66 #include "llvm/Transforms/Scalar.h"
67 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
68 #include "llvm/Transforms/Utils/Cloning.h"
69 #include "llvm/Transforms/Utils/LoopUtils.h"
70 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
71 #include "llvm/Transforms/Utils/UnrollLoop.h"
73 #include "llvm/Pass.h"
79 static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
82 static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden,
85 static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal",
86 cl::Hidden, cl::init(10));
88 #define DEBUG_TYPE "irce"
92 /// An inductive range check is conditional branch in a loop with
94 /// 1. a very cold successor (i.e. the branch jumps to that successor very
99 /// 2. a condition that is provably true for some range of values taken by the
100 /// containing loop's induction variable.
102 /// Currently all inductive range checks are branches conditional on an
103 /// expression of the form
105 /// 0 <= (Offset + Scale * I) < Length
107 /// where `I' is the canonical induction variable of a loop to which Offset and
108 /// Scale are loop invariant, and Length is >= 0. Currently the 'false' branch
109 /// is considered cold, looking at profiling data to verify that is a TODO.
111 class InductiveRangeCheck {
117 InductiveRangeCheck() :
118 Offset(nullptr), Scale(nullptr), Length(nullptr), Branch(nullptr) { }
121 const SCEV *getOffset() const { return Offset; }
122 const SCEV *getScale() const { return Scale; }
123 Value *getLength() const { return Length; }
125 void print(raw_ostream &OS) const {
126 OS << "InductiveRangeCheck:\n";
134 getBranch()->print(OS);
138 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
144 BranchInst *getBranch() const { return Branch; }
146 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If
147 /// R.getEnd() sle R.getBegin(), then R denotes the empty range.
154 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) {
155 assert(Begin->getType() == End->getType() && "ill-typed range!");
158 Type *getType() const { return Begin->getType(); }
159 const SCEV *getBegin() const { return Begin; }
160 const SCEV *getEnd() const { return End; }
163 typedef SpecificBumpPtrAllocator<InductiveRangeCheck> AllocatorTy;
165 /// This is the value the condition of the branch needs to evaluate to for the
166 /// branch to take the hot successor (see (1) above).
167 bool getPassingDirection() { return true; }
169 /// Computes a range for the induction variable (IndVar) in which the range
170 /// check is redundant and can be constant-folded away. The induction
171 /// variable is not required to be the canonical {0,+,1} induction variable.
172 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
173 const SCEVAddRecExpr *IndVar,
174 IRBuilder<> &B) const;
176 /// Create an inductive range check out of BI if possible, else return
178 static InductiveRangeCheck *create(AllocatorTy &Alloc, BranchInst *BI,
179 Loop *L, ScalarEvolution &SE,
180 BranchProbabilityInfo &BPI);
183 class InductiveRangeCheckElimination : public LoopPass {
184 InductiveRangeCheck::AllocatorTy Allocator;
188 InductiveRangeCheckElimination() : LoopPass(ID) {
189 initializeInductiveRangeCheckEliminationPass(
190 *PassRegistry::getPassRegistry());
193 void getAnalysisUsage(AnalysisUsage &AU) const override {
194 AU.addRequired<LoopInfoWrapperPass>();
195 AU.addRequiredID(LoopSimplifyID);
196 AU.addRequiredID(LCSSAID);
197 AU.addRequired<ScalarEvolution>();
198 AU.addRequired<BranchProbabilityInfo>();
201 bool runOnLoop(Loop *L, LPPassManager &LPM) override;
204 char InductiveRangeCheckElimination::ID = 0;
207 INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
208 "Inductive range check elimination", false, false)
210 static bool IsLowerBoundCheck(Value *Check, Value *&IndexV) {
211 using namespace llvm::PatternMatch;
213 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
214 Value *LHS = nullptr, *RHS = nullptr;
216 if (!match(Check, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
223 case ICmpInst::ICMP_SLE:
226 case ICmpInst::ICMP_SGE:
227 if (!match(RHS, m_ConstantInt<0>()))
232 case ICmpInst::ICMP_SLT:
235 case ICmpInst::ICMP_SGT:
236 if (!match(RHS, m_ConstantInt<-1>()))
243 static bool IsUpperBoundCheck(Value *Check, Value *Index, Value *&UpperLimit) {
244 using namespace llvm::PatternMatch;
246 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
247 Value *LHS = nullptr, *RHS = nullptr;
249 if (!match(Check, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
256 case ICmpInst::ICMP_SGT:
259 case ICmpInst::ICMP_SLT:
265 case ICmpInst::ICMP_UGT:
268 case ICmpInst::ICMP_ULT:
276 /// Split a condition into something semantically equivalent to (0 <= I <
277 /// Limit), both comparisons signed and Len loop invariant on L and positive.
278 /// On success, return true and set Index to I and UpperLimit to Limit. Return
279 /// false on failure (we may still write to UpperLimit and Index on failure).
280 /// It does not try to interpret I as a loop index.
282 static bool SplitRangeCheckCondition(Loop *L, ScalarEvolution &SE,
283 Value *Condition, const SCEV *&Index,
284 Value *&UpperLimit) {
286 // TODO: currently this catches some silly cases like comparing "%idx slt 1".
287 // Our transformations are still correct, but less likely to be profitable in
288 // those cases. We have to come up with some heuristics that pick out the
289 // range checks that are more profitable to clone a loop for. This function
290 // in general can be made more robust.
292 using namespace llvm::PatternMatch;
296 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
298 // In these early checks we assume that the matched UpperLimit is positive.
299 // We'll verify that fact later, before returning true.
301 if (match(Condition, m_And(m_Value(A), m_Value(B)))) {
302 Value *IndexV = nullptr;
303 Value *ExpectedUpperBoundCheck = nullptr;
305 if (IsLowerBoundCheck(A, IndexV))
306 ExpectedUpperBoundCheck = B;
307 else if (IsLowerBoundCheck(B, IndexV))
308 ExpectedUpperBoundCheck = A;
312 if (!IsUpperBoundCheck(ExpectedUpperBoundCheck, IndexV, UpperLimit))
315 Index = SE.getSCEV(IndexV);
317 if (isa<SCEVCouldNotCompute>(Index))
320 } else if (match(Condition, m_ICmp(Pred, m_Value(A), m_Value(B)))) {
325 case ICmpInst::ICMP_SGT:
328 case ICmpInst::ICMP_SLT:
330 Index = SE.getSCEV(A);
331 if (isa<SCEVCouldNotCompute>(Index) || !SE.isKnownNonNegative(Index))
335 case ICmpInst::ICMP_UGT:
338 case ICmpInst::ICMP_ULT:
340 Index = SE.getSCEV(A);
341 if (isa<SCEVCouldNotCompute>(Index))
349 const SCEV *UpperLimitSCEV = SE.getSCEV(UpperLimit);
350 if (isa<SCEVCouldNotCompute>(UpperLimitSCEV) ||
351 !SE.isKnownNonNegative(UpperLimitSCEV))
354 if (SE.getLoopDisposition(UpperLimitSCEV, L) !=
355 ScalarEvolution::LoopInvariant) {
356 DEBUG(dbgs() << " in function: " << L->getHeader()->getParent()->getName()
358 dbgs() << " UpperLimit is not loop invariant: "
359 << UpperLimit->getName() << "\n";);
367 InductiveRangeCheck *
368 InductiveRangeCheck::create(InductiveRangeCheck::AllocatorTy &A, BranchInst *BI,
369 Loop *L, ScalarEvolution &SE,
370 BranchProbabilityInfo &BPI) {
372 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
375 BranchProbability LikelyTaken(15, 16);
377 if (BPI.getEdgeProbability(BI->getParent(), (unsigned) 0) < LikelyTaken)
380 Value *Length = nullptr;
381 const SCEV *IndexSCEV = nullptr;
383 if (!SplitRangeCheckCondition(L, SE, BI->getCondition(), IndexSCEV, Length))
386 assert(IndexSCEV && Length && "contract with SplitRangeCheckCondition!");
388 const SCEVAddRecExpr *IndexAddRec = dyn_cast<SCEVAddRecExpr>(IndexSCEV);
390 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine();
395 InductiveRangeCheck *IRC = new (A.Allocate()) InductiveRangeCheck;
396 IRC->Length = Length;
397 IRC->Offset = IndexAddRec->getStart();
398 IRC->Scale = IndexAddRec->getStepRecurrence(SE);
405 // Keeps track of the structure of a loop. This is similar to llvm::Loop,
406 // except that it is more lightweight and can track the state of a loop through
407 // changing and potentially invalid IR. This structure also formalizes the
408 // kinds of loops we can deal with -- ones that have a single latch that is also
409 // an exiting block *and* have a canonical induction variable.
410 struct LoopStructure {
416 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th
417 // successor is `LatchExit', the exit block of the loop.
419 BasicBlock *LatchExit;
420 unsigned LatchBrExitIdx;
425 bool IndVarIncreasing;
428 : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr),
429 LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr),
430 IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {}
432 template <typename M> LoopStructure map(M Map) const {
433 LoopStructure Result;
435 Result.Header = cast<BasicBlock>(Map(Header));
436 Result.Latch = cast<BasicBlock>(Map(Latch));
437 Result.LatchBr = cast<BranchInst>(Map(LatchBr));
438 Result.LatchExit = cast<BasicBlock>(Map(LatchExit));
439 Result.LatchBrExitIdx = LatchBrExitIdx;
440 Result.IndVarNext = Map(IndVarNext);
441 Result.IndVarStart = Map(IndVarStart);
442 Result.LoopExitAt = Map(LoopExitAt);
443 Result.IndVarIncreasing = IndVarIncreasing;
447 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &,
448 BranchProbabilityInfo &BPI,
453 /// This class is used to constrain loops to run within a given iteration space.
454 /// The algorithm this class implements is given a Loop and a range [Begin,
455 /// End). The algorithm then tries to break out a "main loop" out of the loop
456 /// it is given in a way that the "main loop" runs with the induction variable
457 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post
458 /// loops to run any remaining iterations. The pre loop runs any iterations in
459 /// which the induction variable is < Begin, and the post loop runs any
460 /// iterations in which the induction variable is >= End.
462 class LoopConstrainer {
463 // The representation of a clone of the original loop we started out with.
466 std::vector<BasicBlock *> Blocks;
468 // `Map` maps values in the clonee into values in the cloned version
469 ValueToValueMapTy Map;
471 // An instance of `LoopStructure` for the cloned loop
472 LoopStructure Structure;
475 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for
476 // more details on what these fields mean.
477 struct RewrittenRangeInfo {
478 BasicBlock *PseudoExit;
479 BasicBlock *ExitSelector;
480 std::vector<PHINode *> PHIValuesAtPseudoExit;
484 : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {}
487 // Calculated subranges we restrict the iteration space of the main loop to.
488 // See the implementation of `calculateSubRanges' for more details on how
489 // these fields are computed. `LowLimit` is None if there is no restriction
490 // on low end of the restricted iteration space of the main loop. `HighLimit`
491 // is None if there is no restriction on high end of the restricted iteration
492 // space of the main loop.
495 Optional<const SCEV *> LowLimit;
496 Optional<const SCEV *> HighLimit;
499 // A utility function that does a `replaceUsesOfWith' on the incoming block
500 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's
501 // incoming block list with `ReplaceBy'.
502 static void replacePHIBlock(PHINode *PN, BasicBlock *Block,
503 BasicBlock *ReplaceBy);
505 // Compute a safe set of limits for the main loop to run in -- effectively the
506 // intersection of `Range' and the iteration space of the original loop.
507 // Return None if unable to compute the set of subranges.
509 Optional<SubRanges> calculateSubRanges() const;
511 // Clone `OriginalLoop' and return the result in CLResult. The IR after
512 // running `cloneLoop' is well formed except for the PHI nodes in CLResult --
513 // the PHI nodes say that there is an incoming edge from `OriginalPreheader`
514 // but there is no such edge.
516 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const;
518 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The
519 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the
520 // iteration space is not changed. `ExitLoopAt' is assumed to be slt
521 // `OriginalHeaderCount'.
523 // If there are iterations left to execute, control is made to jump to
524 // `ContinuationBlock', otherwise they take the normal loop exit. The
525 // returned `RewrittenRangeInfo' object is populated as follows:
527 // .PseudoExit is a basic block that unconditionally branches to
528 // `ContinuationBlock'.
530 // .ExitSelector is a basic block that decides, on exit from the loop,
531 // whether to branch to the "true" exit or to `PseudoExit'.
533 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value
534 // for each PHINode in the loop header on taking the pseudo exit.
536 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate
537 // preheader because it is made to branch to the loop header only
541 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader,
543 BasicBlock *ContinuationBlock) const;
545 // The loop denoted by `LS' has `OldPreheader' as its preheader. This
546 // function creates a new preheader for `LS' and returns it.
548 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader,
549 const char *Tag) const;
551 // `ContinuationBlockAndPreheader' was the continuation block for some call to
552 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'.
553 // This function rewrites the PHI nodes in `LS.Header' to start with the
555 void rewriteIncomingValuesForPHIs(
556 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader,
557 const LoopConstrainer::RewrittenRangeInfo &RRI) const;
559 // Even though we do not preserve any passes at this time, we at least need to
560 // keep the parent loop structure consistent. The `LPPassManager' seems to
561 // verify this after running a loop pass. This function adds the list of
562 // blocks denoted by BBs to this loops parent loop if required.
563 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs);
565 // Some global state.
570 // Information about the original loop we started out with.
572 LoopInfo &OriginalLoopInfo;
573 const SCEV *LatchTakenCount;
574 BasicBlock *OriginalPreheader;
576 // The preheader of the main loop. This may or may not be different from
577 // `OriginalPreheader'.
578 BasicBlock *MainLoopPreheader;
580 // The range we need to run the main loop in.
581 InductiveRangeCheck::Range Range;
583 // The structure of the main loop (see comment at the beginning of this class
585 LoopStructure MainLoopStructure;
588 LoopConstrainer(Loop &L, LoopInfo &LI, const LoopStructure &LS,
589 ScalarEvolution &SE, InductiveRangeCheck::Range R)
590 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()),
591 SE(SE), OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr),
592 OriginalPreheader(nullptr), MainLoopPreheader(nullptr), Range(R),
593 MainLoopStructure(LS) {}
595 // Entry point for the algorithm. Returns true on success.
601 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
602 BasicBlock *ReplaceBy) {
603 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
604 if (PN->getIncomingBlock(i) == Block)
605 PN->setIncomingBlock(i, ReplaceBy);
608 static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) {
610 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth());
611 return SE.getSignedRange(S).contains(SMax) &&
612 SE.getUnsignedRange(S).contains(SMax);
615 static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) {
617 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth());
618 return SE.getSignedRange(S).contains(SMin) &&
619 SE.getUnsignedRange(S).contains(SMin);
622 Optional<LoopStructure>
623 LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI,
624 Loop &L, const char *&FailureReason) {
625 assert(L.isLoopSimplifyForm() && "should follow from addRequired<>");
627 BasicBlock *Latch = L.getLoopLatch();
628 if (!L.isLoopExiting(Latch)) {
629 FailureReason = "no loop latch";
633 BasicBlock *Header = L.getHeader();
634 BasicBlock *Preheader = L.getLoopPreheader();
636 FailureReason = "no preheader";
640 BranchInst *LatchBr = dyn_cast<BranchInst>(&*Latch->rbegin());
641 if (!LatchBr || LatchBr->isUnconditional()) {
642 FailureReason = "latch terminator not conditional branch";
646 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0;
648 BranchProbability ExitProbability =
649 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx);
651 if (ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) {
652 FailureReason = "short running loop, not profitable";
656 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition());
657 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) {
658 FailureReason = "latch terminator branch not conditional on integral icmp";
662 const SCEV *LatchCount = SE.getExitCount(&L, Latch);
663 if (isa<SCEVCouldNotCompute>(LatchCount)) {
664 FailureReason = "could not compute latch count";
668 ICmpInst::Predicate Pred = ICI->getPredicate();
669 Value *LeftValue = ICI->getOperand(0);
670 const SCEV *LeftSCEV = SE.getSCEV(LeftValue);
671 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType());
673 Value *RightValue = ICI->getOperand(1);
674 const SCEV *RightSCEV = SE.getSCEV(RightValue);
676 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence.
677 if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
678 if (isa<SCEVAddRecExpr>(RightSCEV)) {
679 std::swap(LeftSCEV, RightSCEV);
680 std::swap(LeftValue, RightValue);
681 Pred = ICmpInst::getSwappedPredicate(Pred);
683 FailureReason = "no add recurrences in the icmp";
688 auto IsInductionVar = [&SE](const SCEVAddRecExpr *AR, bool &IsIncreasing) {
692 IntegerType *Ty = cast<IntegerType>(AR->getType());
693 IntegerType *WideTy =
694 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2);
696 // Currently we only work with induction variables that have been proved to
697 // not wrap. This restriction can potentially be lifted in the future.
699 const SCEVAddRecExpr *ExtendAfterOp =
700 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
704 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy);
705 const SCEV *ExtendedStep =
706 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy);
708 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart &&
709 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep;
714 if (const SCEVConstant *StepExpr =
715 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) {
716 ConstantInt *StepCI = StepExpr->getValue();
717 if (StepCI->isOne() || StepCI->isMinusOne()) {
718 IsIncreasing = StepCI->isOne();
726 // `ICI` is interpreted as taking the backedge if the *next* value of the
727 // induction variable satisfies some constraint.
729 const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV);
730 bool IsIncreasing = false;
731 if (!IsInductionVar(IndVarNext, IsIncreasing)) {
732 FailureReason = "LHS in icmp not induction variable";
736 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
737 // TODO: generalize the predicates here to also match their unsigned variants.
739 bool FoundExpectedPred =
740 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) ||
741 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0);
743 if (!FoundExpectedPred) {
744 FailureReason = "expected icmp slt semantically, found something else";
748 if (LatchBrExitIdx == 0) {
749 if (CanBeSMax(SE, RightSCEV)) {
750 // TODO: this restriction is easily removable -- we just have to
751 // remember that the icmp was an slt and not an sle.
752 FailureReason = "limit may overflow when coercing sle to slt";
756 IRBuilder<> B(&*Preheader->rbegin());
757 RightValue = B.CreateAdd(RightValue, One);
761 bool FoundExpectedPred =
762 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) ||
763 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0);
765 if (!FoundExpectedPred) {
766 FailureReason = "expected icmp sgt semantically, found something else";
770 if (LatchBrExitIdx == 0) {
771 if (CanBeSMin(SE, RightSCEV)) {
772 // TODO: this restriction is easily removable -- we just have to
773 // remember that the icmp was an sgt and not an sge.
774 FailureReason = "limit may overflow when coercing sge to sgt";
778 IRBuilder<> B(&*Preheader->rbegin());
779 RightValue = B.CreateSub(RightValue, One);
783 const SCEV *StartNext = IndVarNext->getStart();
784 const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE));
785 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend);
787 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx);
789 assert(SE.getLoopDisposition(LatchCount, &L) ==
790 ScalarEvolution::LoopInvariant &&
791 "loop variant exit count doesn't make sense!");
793 assert(!L.contains(LatchExit) && "expected an exit block!");
794 const DataLayout &DL = Preheader->getModule()->getDataLayout();
795 Value *IndVarStartV =
796 SCEVExpander(SE, DL, "irce")
797 .expandCodeFor(IndVarStart, IndVarTy, &*Preheader->rbegin());
798 IndVarStartV->setName("indvar.start");
800 LoopStructure Result;
803 Result.Header = Header;
804 Result.Latch = Latch;
805 Result.LatchBr = LatchBr;
806 Result.LatchExit = LatchExit;
807 Result.LatchBrExitIdx = LatchBrExitIdx;
808 Result.IndVarStart = IndVarStartV;
809 Result.IndVarNext = LeftValue;
810 Result.IndVarIncreasing = IsIncreasing;
811 Result.LoopExitAt = RightValue;
813 FailureReason = nullptr;
818 Optional<LoopConstrainer::SubRanges>
819 LoopConstrainer::calculateSubRanges() const {
820 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType());
822 if (Range.getType() != Ty)
825 LoopConstrainer::SubRanges Result;
827 // I think we can be more aggressive here and make this nuw / nsw if the
828 // addition that feeds into the icmp for the latch's terminating branch is nuw
829 // / nsw. In any case, a wrapping 2's complement addition is safe.
830 ConstantInt *One = ConstantInt::get(Ty, 1);
831 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart);
832 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt);
834 bool Increasing = MainLoopStructure.IndVarIncreasing;
835 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the
836 // range of values the induction variable takes.
837 const SCEV *Smallest =
838 Increasing ? Start : SE.getAddExpr(End, SE.getSCEV(One));
839 const SCEV *Greatest =
840 Increasing ? End : SE.getAddExpr(Start, SE.getSCEV(One));
842 auto Clamp = [this, Smallest, Greatest](const SCEV *S) {
843 return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S));
846 // In some cases we can prove that we don't need a pre or post loop
848 bool ProvablyNoPreloop =
849 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest);
850 if (!ProvablyNoPreloop)
851 Result.LowLimit = Clamp(Range.getBegin());
853 bool ProvablyNoPostLoop =
854 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd());
855 if (!ProvablyNoPostLoop)
856 Result.HighLimit = Clamp(Range.getEnd());
861 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result,
862 const char *Tag) const {
863 for (BasicBlock *BB : OriginalLoop.getBlocks()) {
864 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F);
865 Result.Blocks.push_back(Clone);
866 Result.Map[BB] = Clone;
869 auto GetClonedValue = [&Result](Value *V) {
870 assert(V && "null values not in domain!");
871 auto It = Result.Map.find(V);
872 if (It == Result.Map.end())
874 return static_cast<Value *>(It->second);
877 Result.Structure = MainLoopStructure.map(GetClonedValue);
878 Result.Structure.Tag = Tag;
880 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) {
881 BasicBlock *ClonedBB = Result.Blocks[i];
882 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i];
884 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!");
886 for (Instruction &I : *ClonedBB)
887 RemapInstruction(&I, Result.Map,
888 RF_NoModuleLevelChanges | RF_IgnoreMissingEntries);
890 // Exit blocks will now have one more predecessor and their PHI nodes need
891 // to be edited to reflect that. No phi nodes need to be introduced because
892 // the loop is in LCSSA.
894 for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB);
895 SBBI != SBBE; ++SBBI) {
897 if (OriginalLoop.contains(*SBBI))
898 continue; // not an exit block
900 for (Instruction &I : **SBBI) {
901 if (!isa<PHINode>(&I))
904 PHINode *PN = cast<PHINode>(&I);
905 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB);
906 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB);
912 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd(
913 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt,
914 BasicBlock *ContinuationBlock) const {
916 // We start with a loop with a single latch:
918 // +--------------------+
922 // +--------+-----------+
923 // | ----------------\
925 // +--------v----v------+ |
929 // +--------------------+ |
933 // +--------------------+ |
935 // | latch >----------/
937 // +-------v------------+
940 // | +--------------------+
942 // +---> original exit |
944 // +--------------------+
946 // We change the control flow to look like
949 // +--------------------+
951 // | preheader >-------------------------+
953 // +--------v-----------+ |
954 // | /-------------+ |
956 // +--------v--v--------+ | |
958 // | header | | +--------+ |
960 // +--------------------+ | | +-----v-----v-----------+
962 // | | | .pseudo.exit |
964 // | | +-----------v-----------+
967 // | | +--------v-------------+
968 // +--------------------+ | | | |
969 // | | | | | ContinuationBlock |
970 // | latch >------+ | | |
971 // | | | +----------------------+
972 // +---------v----------+ |
975 // | +---------------^-----+
977 // +-----> .exit.selector |
979 // +----------v----------+
981 // +--------------------+ |
983 // | original exit <----+
985 // +--------------------+
988 RewrittenRangeInfo RRI;
990 auto BBInsertLocation = std::next(Function::iterator(LS.Latch));
991 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector",
992 &F, BBInsertLocation);
993 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F,
996 BranchInst *PreheaderJump = cast<BranchInst>(&*Preheader->rbegin());
997 bool Increasing = LS.IndVarIncreasing;
999 IRBuilder<> B(PreheaderJump);
1001 // EnterLoopCond - is it okay to start executing this `LS'?
1002 Value *EnterLoopCond = Increasing
1003 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt)
1004 : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt);
1006 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit);
1007 PreheaderJump->eraseFromParent();
1009 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector);
1010 B.SetInsertPoint(LS.LatchBr);
1011 Value *TakeBackedgeLoopCond =
1012 Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt)
1013 : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt);
1014 Value *CondForBranch = LS.LatchBrExitIdx == 1
1015 ? TakeBackedgeLoopCond
1016 : B.CreateNot(TakeBackedgeLoopCond);
1018 LS.LatchBr->setCondition(CondForBranch);
1020 B.SetInsertPoint(RRI.ExitSelector);
1022 // IterationsLeft - are there any more iterations left, given the original
1023 // upper bound on the induction variable? If not, we branch to the "real"
1025 Value *IterationsLeft = Increasing
1026 ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt)
1027 : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt);
1028 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit);
1030 BranchInst *BranchToContinuation =
1031 BranchInst::Create(ContinuationBlock, RRI.PseudoExit);
1033 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of
1034 // each of the PHI nodes in the loop header. This feeds into the initial
1035 // value of the same PHI nodes if/when we continue execution.
1036 for (Instruction &I : *LS.Header) {
1037 if (!isa<PHINode>(&I))
1040 PHINode *PN = cast<PHINode>(&I);
1042 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy",
1043 BranchToContinuation);
1045 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader);
1046 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch),
1048 RRI.PHIValuesAtPseudoExit.push_back(NewPHI);
1051 RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end",
1052 BranchToContinuation);
1053 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader);
1054 RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector);
1056 // The latch exit now has a branch from `RRI.ExitSelector' instead of
1057 // `LS.Latch'. The PHI nodes need to be updated to reflect that.
1058 for (Instruction &I : *LS.LatchExit) {
1059 if (PHINode *PN = dyn_cast<PHINode>(&I))
1060 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector);
1068 void LoopConstrainer::rewriteIncomingValuesForPHIs(
1069 LoopStructure &LS, BasicBlock *ContinuationBlock,
1070 const LoopConstrainer::RewrittenRangeInfo &RRI) const {
1072 unsigned PHIIndex = 0;
1073 for (Instruction &I : *LS.Header) {
1074 if (!isa<PHINode>(&I))
1077 PHINode *PN = cast<PHINode>(&I);
1079 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
1080 if (PN->getIncomingBlock(i) == ContinuationBlock)
1081 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]);
1084 LS.IndVarStart = RRI.IndVarEnd;
1087 BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS,
1088 BasicBlock *OldPreheader,
1089 const char *Tag) const {
1091 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header);
1092 BranchInst::Create(LS.Header, Preheader);
1094 for (Instruction &I : *LS.Header) {
1095 if (!isa<PHINode>(&I))
1098 PHINode *PN = cast<PHINode>(&I);
1099 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
1100 replacePHIBlock(PN, OldPreheader, Preheader);
1106 void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) {
1107 Loop *ParentLoop = OriginalLoop.getParentLoop();
1111 for (BasicBlock *BB : BBs)
1112 ParentLoop->addBasicBlockToLoop(BB, OriginalLoopInfo);
1115 bool LoopConstrainer::run() {
1116 BasicBlock *Preheader = nullptr;
1117 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch);
1118 Preheader = OriginalLoop.getLoopPreheader();
1119 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr &&
1122 OriginalPreheader = Preheader;
1123 MainLoopPreheader = Preheader;
1125 Optional<SubRanges> MaybeSR = calculateSubRanges();
1126 if (!MaybeSR.hasValue()) {
1127 DEBUG(dbgs() << "irce: could not compute subranges\n");
1131 SubRanges SR = MaybeSR.getValue();
1132 bool Increasing = MainLoopStructure.IndVarIncreasing;
1134 cast<IntegerType>(MainLoopStructure.IndVarNext->getType());
1136 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce");
1137 Instruction *InsertPt = OriginalPreheader->getTerminator();
1139 // It would have been better to make `PreLoop' and `PostLoop'
1140 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
1142 ClonedLoop PreLoop, PostLoop;
1144 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue();
1145 bool NeedsPostLoop =
1146 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue();
1148 Value *ExitPreLoopAt = nullptr;
1149 Value *ExitMainLoopAt = nullptr;
1150 const SCEVConstant *MinusOneS =
1151 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */));
1154 const SCEV *ExitPreLoopAtSCEV = nullptr;
1157 ExitPreLoopAtSCEV = *SR.LowLimit;
1159 if (CanBeSMin(SE, *SR.HighLimit)) {
1160 DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
1161 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit)
1165 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
1168 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt);
1169 ExitPreLoopAt->setName("exit.preloop.at");
1172 if (NeedsPostLoop) {
1173 const SCEV *ExitMainLoopAtSCEV = nullptr;
1176 ExitMainLoopAtSCEV = *SR.HighLimit;
1178 if (CanBeSMin(SE, *SR.LowLimit)) {
1179 DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
1180 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit)
1184 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
1187 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt);
1188 ExitMainLoopAt->setName("exit.mainloop.at");
1191 // We clone these ahead of time so that we don't have to deal with changing
1192 // and temporarily invalid IR as we transform the loops.
1194 cloneLoop(PreLoop, "preloop");
1196 cloneLoop(PostLoop, "postloop");
1198 RewrittenRangeInfo PreLoopRRI;
1201 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header,
1202 PreLoop.Structure.Header);
1205 createPreheader(MainLoopStructure, Preheader, "mainloop");
1206 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader,
1207 ExitPreLoopAt, MainLoopPreheader);
1208 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader,
1212 BasicBlock *PostLoopPreheader = nullptr;
1213 RewrittenRangeInfo PostLoopRRI;
1215 if (NeedsPostLoop) {
1217 createPreheader(PostLoop.Structure, Preheader, "postloop");
1218 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader,
1219 ExitMainLoopAt, PostLoopPreheader);
1220 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader,
1224 BasicBlock *NewMainLoopPreheader =
1225 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr;
1226 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit,
1227 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit,
1228 PostLoopRRI.ExitSelector, NewMainLoopPreheader};
1230 // Some of the above may be nullptr, filter them out before passing to
1231 // addToParentLoopIfNeeded.
1233 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr);
1235 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd));
1236 addToParentLoopIfNeeded(PreLoop.Blocks);
1237 addToParentLoopIfNeeded(PostLoop.Blocks);
1242 /// Computes and returns a range of values for the induction variable (IndVar)
1243 /// in which the range check can be safely elided. If it cannot compute such a
1244 /// range, returns None.
1245 Optional<InductiveRangeCheck::Range>
1246 InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
1247 const SCEVAddRecExpr *IndVar,
1248 IRBuilder<> &) const {
1249 // IndVar is of the form "A + B * I" (where "I" is the canonical induction
1250 // variable, that may or may not exist as a real llvm::Value in the loop) and
1251 // this inductive range check is a range check on the "C + D * I" ("C" is
1252 // getOffset() and "D" is getScale()). We rewrite the value being range
1253 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA".
1254 // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code
1255 // can be generalized as needed.
1257 // The actual inequalities we solve are of the form
1259 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1)
1261 // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions
1262 // and subtractions are twos-complement wrapping and comparisons are signed.
1266 // If there exists IndVar such that -M <= IndVar < (L - M) then it follows
1267 // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows
1268 // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have
1271 // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t.
1272 // Hence 0 <= (IndVar + M) < L
1274 // [^1]: Note that the solution does _not_ apply if L < 0; consider values M =
1275 // 127, IndVar = 126 and L = -2 in an i8 world.
1277 if (!IndVar->isAffine())
1280 const SCEV *A = IndVar->getStart();
1281 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE));
1285 const SCEV *C = getOffset();
1286 const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale());
1290 ConstantInt *ConstD = D->getValue();
1291 if (!(ConstD->isMinusOne() || ConstD->isOne()))
1294 const SCEV *M = SE.getMinusSCEV(C, A);
1296 const SCEV *Begin = SE.getNegativeSCEV(M);
1297 const SCEV *End = SE.getMinusSCEV(SE.getSCEV(getLength()), M);
1299 return InductiveRangeCheck::Range(Begin, End);
1302 static Optional<InductiveRangeCheck::Range>
1303 IntersectRange(ScalarEvolution &SE,
1304 const Optional<InductiveRangeCheck::Range> &R1,
1305 const InductiveRangeCheck::Range &R2, IRBuilder<> &B) {
1308 auto &R1Value = R1.getValue();
1310 // TODO: we could widen the smaller range and have this work; but for now we
1311 // bail out to keep things simple.
1312 if (R1Value.getType() != R2.getType())
1315 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin());
1316 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd());
1318 return InductiveRangeCheck::Range(NewBegin, NewEnd);
1321 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) {
1322 if (L->getBlocks().size() >= LoopSizeCutoff) {
1323 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";);
1327 BasicBlock *Preheader = L->getLoopPreheader();
1329 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
1333 LLVMContext &Context = Preheader->getContext();
1334 InductiveRangeCheck::AllocatorTy IRCAlloc;
1335 SmallVector<InductiveRangeCheck *, 16> RangeChecks;
1336 ScalarEvolution &SE = getAnalysis<ScalarEvolution>();
1337 BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>();
1339 for (auto BBI : L->getBlocks())
1340 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator()))
1341 if (InductiveRangeCheck *IRC =
1342 InductiveRangeCheck::create(IRCAlloc, TBI, L, SE, BPI))
1343 RangeChecks.push_back(IRC);
1345 if (RangeChecks.empty())
1348 DEBUG(dbgs() << "irce: looking at loop "; L->print(dbgs());
1349 dbgs() << "irce: loop has " << RangeChecks.size()
1350 << " inductive range checks: \n";
1351 for (InductiveRangeCheck *IRC : RangeChecks)
1355 const char *FailureReason = nullptr;
1356 Optional<LoopStructure> MaybeLoopStructure =
1357 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason);
1358 if (!MaybeLoopStructure.hasValue()) {
1359 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason
1363 LoopStructure LS = MaybeLoopStructure.getValue();
1364 bool Increasing = LS.IndVarIncreasing;
1365 const SCEV *MinusOne =
1366 SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true);
1367 const SCEVAddRecExpr *IndVar =
1368 cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne));
1370 Optional<InductiveRangeCheck::Range> SafeIterRange;
1371 Instruction *ExprInsertPt = Preheader->getTerminator();
1373 SmallVector<InductiveRangeCheck *, 4> RangeChecksToEliminate;
1375 IRBuilder<> B(ExprInsertPt);
1376 for (InductiveRangeCheck *IRC : RangeChecks) {
1377 auto Result = IRC->computeSafeIterationSpace(SE, IndVar, B);
1378 if (Result.hasValue()) {
1379 auto MaybeSafeIterRange =
1380 IntersectRange(SE, SafeIterRange, Result.getValue(), B);
1381 if (MaybeSafeIterRange.hasValue()) {
1382 RangeChecksToEliminate.push_back(IRC);
1383 SafeIterRange = MaybeSafeIterRange.getValue();
1388 if (!SafeIterRange.hasValue())
1391 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LS,
1392 SE, SafeIterRange.getValue());
1393 bool Changed = LC.run();
1396 auto PrintConstrainedLoopInfo = [L]() {
1397 dbgs() << "irce: in function ";
1398 dbgs() << L->getHeader()->getParent()->getName() << ": ";
1399 dbgs() << "constrained ";
1403 DEBUG(PrintConstrainedLoopInfo());
1405 if (PrintChangedLoops)
1406 PrintConstrainedLoopInfo();
1408 // Optimize away the now-redundant range checks.
1410 for (InductiveRangeCheck *IRC : RangeChecksToEliminate) {
1411 ConstantInt *FoldedRangeCheck = IRC->getPassingDirection()
1412 ? ConstantInt::getTrue(Context)
1413 : ConstantInt::getFalse(Context);
1414 IRC->getBranch()->setCondition(FoldedRangeCheck);
1421 Pass *llvm::createInductiveRangeCheckEliminationPass() {
1422 return new InductiveRangeCheckElimination;