1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into forms suitable for efficient execution
14 // This pass performs a strength reduction on array references inside loops that
15 // have as one or more of their components the loop induction variable, it
16 // rewrites expressions to take advantage of scaled-index addressing modes
17 // available on the target, and it performs a variety of other optimizations
18 // related to loop induction variables.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "loop-reduce"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/Type.h"
28 #include "llvm/DerivedTypes.h"
29 #include "llvm/Analysis/IVUsers.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/Analysis/LoopPass.h"
32 #include "llvm/Analysis/ScalarEvolutionExpander.h"
33 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
34 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/Support/CFG.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ValueHandle.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetLowering.h"
46 STATISTIC(NumReduced , "Number of IV uses strength reduced");
47 STATISTIC(NumInserted, "Number of PHIs inserted");
48 STATISTIC(NumVariable, "Number of PHIs with variable strides");
49 STATISTIC(NumEliminated, "Number of strides eliminated");
50 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
51 STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses");
52 STATISTIC(NumLoopCond, "Number of loop terminating conds optimized");
53 STATISTIC(NumCountZero, "Number of count iv optimized to count toward zero");
55 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
63 /// IVInfo - This structure keeps track of one IV expression inserted during
64 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
65 /// well as the PHI node and increment value created for rewrite.
71 IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi)
72 : Stride(stride), Base(base), PHI(phi) {}
75 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
76 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
77 struct IVsOfOneStride {
78 std::vector<IVExpr> IVs;
80 void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) {
81 IVs.push_back(IVExpr(Stride, Base, PHI));
85 class LoopStrengthReduce : public LoopPass {
91 /// IVsByStride - Keep track of all IVs that have been inserted for a
92 /// particular stride.
93 std::map<const SCEV *, IVsOfOneStride> IVsByStride;
95 /// StrideNoReuse - Keep track of all the strides whose ivs cannot be
96 /// reused (nor should they be rewritten to reuse other strides).
97 SmallSet<const SCEV *, 4> StrideNoReuse;
99 /// DeadInsts - Keep track of instructions we may have made dead, so that
100 /// we can remove them after we are done working.
101 SmallVector<WeakVH, 16> DeadInsts;
103 /// TLI - Keep a pointer of a TargetLowering to consult for determining
104 /// transformation profitability.
105 const TargetLowering *TLI;
108 static char ID; // Pass ID, replacement for typeid
109 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
110 LoopPass(&ID), TLI(tli) {}
112 bool runOnLoop(Loop *L, LPPassManager &LPM);
114 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
115 // We split critical edges, so we change the CFG. However, we do update
116 // many analyses if they are around.
117 AU.addPreservedID(LoopSimplifyID);
118 AU.addPreserved<LoopInfo>();
119 AU.addPreserved("domfrontier");
120 AU.addPreserved("domtree");
122 AU.addRequiredID(LoopSimplifyID);
123 AU.addRequired<LoopInfo>();
124 AU.addRequired<ScalarEvolution>();
125 AU.addPreserved<ScalarEvolution>();
126 AU.addRequired<IVUsers>();
127 AU.addPreserved<IVUsers>();
131 void OptimizeIndvars(Loop *L);
133 /// OptimizeLoopTermCond - Change loop terminating condition to use the
134 /// postinc iv when possible.
135 void OptimizeLoopTermCond(Loop *L);
137 /// OptimizeShadowIV - If IV is used in a int-to-float cast
138 /// inside the loop then try to eliminate the cast opeation.
139 void OptimizeShadowIV(Loop *L);
141 /// OptimizeMax - Rewrite the loop's terminating condition
142 /// if it uses a max computation.
143 ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond,
144 IVStrideUse* &CondUse);
146 /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for
147 /// deciding when to exit the loop is used only for that purpose, try to
148 /// rearrange things so it counts down to a test against zero.
149 bool OptimizeLoopCountIV(Loop *L);
150 bool OptimizeLoopCountIVOfStride(const SCEV* &Stride,
151 IVStrideUse* &CondUse, Loop *L);
153 /// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a
154 /// single stride of IV. All of the users may have different starting
155 /// values, and this may not be the only stride.
156 void StrengthReduceIVUsersOfStride(const SCEV *const &Stride,
157 IVUsersOfOneStride &Uses,
159 void StrengthReduceIVUsers(Loop *L);
161 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
162 IVStrideUse* &CondUse,
163 const SCEV* &CondStride,
164 bool PostPass = false);
166 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
167 const SCEV* &CondStride);
168 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
169 const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&,
170 IVExpr&, const Type*,
171 const std::vector<BasedUser>& UsersToProcess);
172 bool ValidScale(bool, int64_t,
173 const std::vector<BasedUser>& UsersToProcess);
174 bool ValidOffset(bool, int64_t, int64_t,
175 const std::vector<BasedUser>& UsersToProcess);
176 const SCEV *CollectIVUsers(const SCEV *const &Stride,
177 IVUsersOfOneStride &Uses,
179 bool &AllUsesAreAddresses,
180 bool &AllUsesAreOutsideLoop,
181 std::vector<BasedUser> &UsersToProcess);
182 bool StrideMightBeShared(const SCEV *Stride, Loop *L, bool CheckPreInc);
183 bool ShouldUseFullStrengthReductionMode(
184 const std::vector<BasedUser> &UsersToProcess,
186 bool AllUsesAreAddresses,
188 void PrepareToStrengthReduceFully(
189 std::vector<BasedUser> &UsersToProcess,
191 const SCEV *CommonExprs,
193 SCEVExpander &PreheaderRewriter);
194 void PrepareToStrengthReduceFromSmallerStride(
195 std::vector<BasedUser> &UsersToProcess,
197 const IVExpr &ReuseIV,
198 Instruction *PreInsertPt);
199 void PrepareToStrengthReduceWithNewPhi(
200 std::vector<BasedUser> &UsersToProcess,
202 const SCEV *CommonExprs,
204 Instruction *IVIncInsertPt,
206 SCEVExpander &PreheaderRewriter);
208 void DeleteTriviallyDeadInstructions();
212 char LoopStrengthReduce::ID = 0;
213 static RegisterPass<LoopStrengthReduce>
214 X("loop-reduce", "Loop Strength Reduction");
216 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
217 return new LoopStrengthReduce(TLI);
220 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
221 /// specified set are trivially dead, delete them and see if this makes any of
222 /// their operands subsequently dead.
223 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
224 if (DeadInsts.empty()) return;
226 while (!DeadInsts.empty()) {
227 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
229 if (I == 0 || !isInstructionTriviallyDead(I))
232 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
233 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
236 DeadInsts.push_back(U);
239 I->eraseFromParent();
244 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
245 /// subexpression that is an AddRec from a loop other than L. An outer loop
246 /// of L is OK, but not an inner loop nor a disjoint loop.
247 static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) {
248 // This is very common, put it first.
249 if (isa<SCEVConstant>(S))
251 if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
252 for (unsigned int i=0; i< AE->getNumOperands(); i++)
253 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
257 if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
258 if (const Loop *newLoop = AE->getLoop()) {
261 // if newLoop is an outer loop of L, this is OK.
262 if (!LoopInfo::isNotAlreadyContainedIn(L, newLoop))
267 if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
268 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
269 containsAddRecFromDifferentLoop(DE->getRHS(), L);
271 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
272 // need this when it is.
273 if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
274 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
275 containsAddRecFromDifferentLoop(DE->getRHS(), L);
277 if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
278 return containsAddRecFromDifferentLoop(CE->getOperand(), L);
282 /// isAddressUse - Returns true if the specified instruction is using the
283 /// specified value as an address.
284 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
285 bool isAddress = isa<LoadInst>(Inst);
286 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
287 if (SI->getOperand(1) == OperandVal)
289 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
290 // Addressing modes can also be folded into prefetches and a variety
292 switch (II->getIntrinsicID()) {
294 case Intrinsic::prefetch:
295 case Intrinsic::x86_sse2_loadu_dq:
296 case Intrinsic::x86_sse2_loadu_pd:
297 case Intrinsic::x86_sse_loadu_ps:
298 case Intrinsic::x86_sse_storeu_ps:
299 case Intrinsic::x86_sse2_storeu_pd:
300 case Intrinsic::x86_sse2_storeu_dq:
301 case Intrinsic::x86_sse2_storel_dq:
302 if (II->getOperand(1) == OperandVal)
310 /// getAccessType - Return the type of the memory being accessed.
311 static const Type *getAccessType(const Instruction *Inst) {
312 const Type *AccessTy = Inst->getType();
313 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
314 AccessTy = SI->getOperand(0)->getType();
315 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
316 // Addressing modes can also be folded into prefetches and a variety
318 switch (II->getIntrinsicID()) {
320 case Intrinsic::x86_sse_storeu_ps:
321 case Intrinsic::x86_sse2_storeu_pd:
322 case Intrinsic::x86_sse2_storeu_dq:
323 case Intrinsic::x86_sse2_storel_dq:
324 AccessTy = II->getOperand(1)->getType();
332 /// BasedUser - For a particular base value, keep information about how we've
333 /// partitioned the expression so far.
335 /// SE - The current ScalarEvolution object.
338 /// Base - The Base value for the PHI node that needs to be inserted for
339 /// this use. As the use is processed, information gets moved from this
340 /// field to the Imm field (below). BasedUser values are sorted by this
344 /// Inst - The instruction using the induction variable.
347 /// OperandValToReplace - The operand value of Inst to replace with the
349 Value *OperandValToReplace;
351 /// Imm - The immediate value that should be added to the base immediately
352 /// before Inst, because it will be folded into the imm field of the
353 /// instruction. This is also sometimes used for loop-variant values that
354 /// must be added inside the loop.
357 /// Phi - The induction variable that performs the striding that
358 /// should be used for this user.
361 // isUseOfPostIncrementedValue - True if this should use the
362 // post-incremented version of this IV, not the preincremented version.
363 // This can only be set in special cases, such as the terminating setcc
364 // instruction for a loop and uses outside the loop that are dominated by
366 bool isUseOfPostIncrementedValue;
368 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
369 : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()),
370 OperandValToReplace(IVSU.getOperandValToReplace()),
371 Imm(SE->getIntegerSCEV(0, Base->getType())),
372 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
374 // Once we rewrite the code to insert the new IVs we want, update the
375 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
377 void RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
378 Instruction *InsertPt,
379 SCEVExpander &Rewriter, Loop *L, Pass *P,
380 SmallVectorImpl<WeakVH> &DeadInsts);
382 Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
384 SCEVExpander &Rewriter,
390 void BasedUser::dump() const {
391 errs() << " Base=" << *Base;
392 errs() << " Imm=" << *Imm;
393 errs() << " Inst: " << *Inst;
396 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
398 SCEVExpander &Rewriter,
400 Value *Base = Rewriter.expandCodeFor(NewBase, 0, IP);
402 // Wrap the base in a SCEVUnknown so that ScalarEvolution doesn't try to
404 const SCEV *NewValSCEV = SE->getUnknown(Base);
406 // Always emit the immediate into the same block as the user.
407 NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
409 return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
413 // Once we rewrite the code to insert the new IVs we want, update the
414 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
415 // to it. NewBasePt is the last instruction which contributes to the
416 // value of NewBase in the case that it's a diffferent instruction from
417 // the PHI that NewBase is computed from, or null otherwise.
419 void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
420 Instruction *NewBasePt,
421 SCEVExpander &Rewriter, Loop *L, Pass *P,
422 SmallVectorImpl<WeakVH> &DeadInsts) {
423 if (!isa<PHINode>(Inst)) {
424 // By default, insert code at the user instruction.
425 BasicBlock::iterator InsertPt = Inst;
427 // However, if the Operand is itself an instruction, the (potentially
428 // complex) inserted code may be shared by many users. Because of this, we
429 // want to emit code for the computation of the operand right before its old
430 // computation. This is usually safe, because we obviously used to use the
431 // computation when it was computed in its current block. However, in some
432 // cases (e.g. use of a post-incremented induction variable) the NewBase
433 // value will be pinned to live somewhere after the original computation.
434 // In this case, we have to back off.
436 // If this is a use outside the loop (which means after, since it is based
437 // on a loop indvar) we use the post-incremented value, so that we don't
438 // artificially make the preinc value live out the bottom of the loop.
439 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
440 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
441 InsertPt = NewBasePt;
443 } else if (Instruction *OpInst
444 = dyn_cast<Instruction>(OperandValToReplace)) {
446 while (isa<PHINode>(InsertPt)) ++InsertPt;
449 Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
450 OperandValToReplace->getType(),
452 // Replace the use of the operand Value with the new Phi we just created.
453 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
455 DEBUG(errs() << " Replacing with ");
456 DEBUG(WriteAsOperand(errs(), NewVal, /*PrintType=*/false));
457 DEBUG(errs() << ", which has value " << *NewBase << " plus IMM "
462 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
463 // expression into each operand block that uses it. Note that PHI nodes can
464 // have multiple entries for the same predecessor. We use a map to make sure
465 // that a PHI node only has a single Value* for each predecessor (which also
466 // prevents us from inserting duplicate code in some blocks).
467 DenseMap<BasicBlock*, Value*> InsertedCode;
468 PHINode *PN = cast<PHINode>(Inst);
469 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
470 if (PN->getIncomingValue(i) == OperandValToReplace) {
471 // If the original expression is outside the loop, put the replacement
472 // code in the same place as the original expression,
473 // which need not be an immediate predecessor of this PHI. This way we
474 // need only one copy of it even if it is referenced multiple times in
475 // the PHI. We don't do this when the original expression is inside the
476 // loop because multiple copies sometimes do useful sinking of code in
478 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
479 BasicBlock *PHIPred = PN->getIncomingBlock(i);
480 if (L->contains(OldLoc->getParent())) {
481 // If this is a critical edge, split the edge so that we do not insert
482 // the code on all predecessor/successor paths. We do this unless this
483 // is the canonical backedge for this loop, as this can make some
484 // inserted code be in an illegal position.
485 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
486 !isa<IndirectBrInst>(PHIPred->getTerminator()) &&
487 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
489 // First step, split the critical edge.
490 BasicBlock *NewBB = SplitCriticalEdge(PHIPred, PN->getParent(),
493 // Next step: move the basic block. In particular, if the PHI node
494 // is outside of the loop, and PredTI is in the loop, we want to
495 // move the block to be immediately before the PHI block, not
496 // immediately after PredTI.
497 if (L->contains(PHIPred) && !L->contains(PN->getParent()))
498 NewBB->moveBefore(PN->getParent());
500 // Splitting the edge can reduce the number of PHI entries we have.
501 e = PN->getNumIncomingValues();
503 i = PN->getBasicBlockIndex(PHIPred);
506 Value *&Code = InsertedCode[PHIPred];
508 // Insert the code into the end of the predecessor block.
509 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
510 PHIPred->getTerminator() :
511 OldLoc->getParent()->getTerminator();
512 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
515 DEBUG(errs() << " Changing PHI use to ");
516 DEBUG(WriteAsOperand(errs(), Code, /*PrintType=*/false));
517 DEBUG(errs() << ", which has value " << *NewBase << " plus IMM "
521 // Replace the use of the operand Value with the new Phi we just created.
522 PN->setIncomingValue(i, Code);
527 // PHI node might have become a constant value after SplitCriticalEdge.
528 DeadInsts.push_back(Inst);
532 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
533 /// mode, and does not need to be put in a register first.
534 static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy,
535 const TargetLowering *TLI, bool HasBaseReg) {
536 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
537 int64_t VC = SC->getValue()->getSExtValue();
539 TargetLowering::AddrMode AM;
541 AM.HasBaseReg = HasBaseReg;
542 return TLI->isLegalAddressingMode(AM, AccessTy);
544 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
545 return (VC > -(1 << 16) && VC < (1 << 16)-1);
549 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
550 if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
552 TargetLowering::AddrMode AM;
554 AM.HasBaseReg = HasBaseReg;
555 return TLI->isLegalAddressingMode(AM, AccessTy);
557 // Default: assume global addresses are not legal.
564 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
565 /// loop varying to the Imm operand.
566 static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm,
567 Loop *L, ScalarEvolution *SE) {
568 if (Val->isLoopInvariant(L)) return; // Nothing to do.
570 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
571 SmallVector<const SCEV *, 4> NewOps;
572 NewOps.reserve(SAE->getNumOperands());
574 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
575 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
576 // If this is a loop-variant expression, it must stay in the immediate
577 // field of the expression.
578 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
580 NewOps.push_back(SAE->getOperand(i));
584 Val = SE->getIntegerSCEV(0, Val->getType());
586 Val = SE->getAddExpr(NewOps);
587 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
588 // Try to pull immediates out of the start value of nested addrec's.
589 const SCEV *Start = SARE->getStart();
590 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
592 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
594 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
596 // Otherwise, all of Val is variant, move the whole thing over.
597 Imm = SE->getAddExpr(Imm, Val);
598 Val = SE->getIntegerSCEV(0, Val->getType());
603 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
604 /// that can fit into the immediate field of instructions in the target.
605 /// Accumulate these immediate values into the Imm value.
606 static void MoveImmediateValues(const TargetLowering *TLI,
607 const Type *AccessTy,
608 const SCEV *&Val, const SCEV *&Imm,
609 bool isAddress, Loop *L,
610 ScalarEvolution *SE) {
611 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
612 SmallVector<const SCEV *, 4> NewOps;
613 NewOps.reserve(SAE->getNumOperands());
615 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
616 const SCEV *NewOp = SAE->getOperand(i);
617 MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE);
619 if (!NewOp->isLoopInvariant(L)) {
620 // If this is a loop-variant expression, it must stay in the immediate
621 // field of the expression.
622 Imm = SE->getAddExpr(Imm, NewOp);
624 NewOps.push_back(NewOp);
629 Val = SE->getIntegerSCEV(0, Val->getType());
631 Val = SE->getAddExpr(NewOps);
633 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
634 // Try to pull immediates out of the start value of nested addrec's.
635 const SCEV *Start = SARE->getStart();
636 MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE);
638 if (Start != SARE->getStart()) {
639 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
641 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
644 } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
645 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
647 fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) &&
648 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
650 const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType());
651 const SCEV *NewOp = SME->getOperand(1);
652 MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE);
654 // If we extracted something out of the subexpressions, see if we can
656 if (NewOp != SME->getOperand(1)) {
657 // Scale SubImm up by "8". If the result is a target constant, we are
659 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
660 if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) {
661 // Accumulate the immediate.
662 Imm = SE->getAddExpr(Imm, SubImm);
664 // Update what is left of 'Val'.
665 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
672 // Loop-variant expressions must stay in the immediate field of the
674 if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) ||
675 !Val->isLoopInvariant(L)) {
676 Imm = SE->getAddExpr(Imm, Val);
677 Val = SE->getIntegerSCEV(0, Val->getType());
681 // Otherwise, no immediates to move.
684 static void MoveImmediateValues(const TargetLowering *TLI,
686 const SCEV *&Val, const SCEV *&Imm,
687 bool isAddress, Loop *L,
688 ScalarEvolution *SE) {
689 const Type *AccessTy = getAccessType(User);
690 MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE);
693 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
694 /// added together. This is used to reassociate common addition subexprs
695 /// together for maximal sharing when rewriting bases.
696 static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs,
698 ScalarEvolution *SE) {
699 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
700 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
701 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
702 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
703 const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType());
704 if (SARE->getOperand(0) == Zero) {
705 SubExprs.push_back(Expr);
707 // Compute the addrec with zero as its base.
708 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
709 Ops[0] = Zero; // Start with zero base.
710 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
713 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
715 } else if (!Expr->isZero()) {
717 SubExprs.push_back(Expr);
721 // This is logically local to the following function, but C++ says we have
722 // to make it file scope.
723 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
725 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
726 /// the Uses, removing any common subexpressions, except that if all such
727 /// subexpressions can be folded into an addressing mode for all uses inside
728 /// the loop (this case is referred to as "free" in comments herein) we do
729 /// not remove anything. This looks for things like (a+b+c) and
730 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
731 /// is *removed* from the Bases and returned.
733 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
734 ScalarEvolution *SE, Loop *L,
735 const TargetLowering *TLI) {
736 unsigned NumUses = Uses.size();
738 // Only one use? This is a very common case, so we handle it specially and
740 const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
741 const SCEV *Result = Zero;
742 const SCEV *FreeResult = Zero;
744 // If the use is inside the loop, use its base, regardless of what it is:
745 // it is clearly shared across all the IV's. If the use is outside the loop
746 // (which means after it) we don't want to factor anything *into* the loop,
747 // so just use 0 as the base.
748 if (L->contains(Uses[0].Inst->getParent()))
749 std::swap(Result, Uses[0].Base);
753 // To find common subexpressions, count how many of Uses use each expression.
754 // If any subexpressions are used Uses.size() times, they are common.
755 // Also track whether all uses of each expression can be moved into an
756 // an addressing mode "for free"; such expressions are left within the loop.
757 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
758 std::map<const SCEV *, SubExprUseData> SubExpressionUseData;
760 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
761 // order we see them.
762 SmallVector<const SCEV *, 16> UniqueSubExprs;
764 SmallVector<const SCEV *, 16> SubExprs;
765 unsigned NumUsesInsideLoop = 0;
766 for (unsigned i = 0; i != NumUses; ++i) {
767 // If the user is outside the loop, just ignore it for base computation.
768 // Since the user is outside the loop, it must be *after* the loop (if it
769 // were before, it could not be based on the loop IV). We don't want users
770 // after the loop to affect base computation of values *inside* the loop,
771 // because we can always add their offsets to the result IV after the loop
772 // is done, ensuring we get good code inside the loop.
773 if (!L->contains(Uses[i].Inst->getParent()))
777 // If the base is zero (which is common), return zero now, there are no
779 if (Uses[i].Base == Zero) return Zero;
781 // If this use is as an address we may be able to put CSEs in the addressing
782 // mode rather than hoisting them.
783 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
784 // We may need the AccessTy below, but only when isAddrUse, so compute it
785 // only in that case.
786 const Type *AccessTy = 0;
788 AccessTy = getAccessType(Uses[i].Inst);
790 // Split the expression into subexprs.
791 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
792 // Add one to SubExpressionUseData.Count for each subexpr present, and
793 // if the subexpr is not a valid immediate within an addressing mode use,
794 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
795 // hoist these out of the loop (if they are common to all uses).
796 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
797 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
798 UniqueSubExprs.push_back(SubExprs[j]);
799 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false))
800 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
805 // Now that we know how many times each is used, build Result. Iterate over
806 // UniqueSubexprs so that we have a stable ordering.
807 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
808 std::map<const SCEV *, SubExprUseData>::iterator I =
809 SubExpressionUseData.find(UniqueSubExprs[i]);
810 assert(I != SubExpressionUseData.end() && "Entry not found?");
811 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
812 if (I->second.notAllUsesAreFree)
813 Result = SE->getAddExpr(Result, I->first);
815 FreeResult = SE->getAddExpr(FreeResult, I->first);
817 // Remove non-cse's from SubExpressionUseData.
818 SubExpressionUseData.erase(I);
821 if (FreeResult != Zero) {
822 // We have some subexpressions that can be subsumed into addressing
823 // modes in every use inside the loop. However, it's possible that
824 // there are so many of them that the combined FreeResult cannot
825 // be subsumed, or that the target cannot handle both a FreeResult
826 // and a Result in the same instruction (for example because it would
827 // require too many registers). Check this.
828 for (unsigned i=0; i<NumUses; ++i) {
829 if (!L->contains(Uses[i].Inst->getParent()))
831 // We know this is an addressing mode use; if there are any uses that
832 // are not, FreeResult would be Zero.
833 const Type *AccessTy = getAccessType(Uses[i].Inst);
834 if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) {
835 // FIXME: could split up FreeResult into pieces here, some hoisted
836 // and some not. There is no obvious advantage to this.
837 Result = SE->getAddExpr(Result, FreeResult);
844 // If we found no CSE's, return now.
845 if (Result == Zero) return Result;
847 // If we still have a FreeResult, remove its subexpressions from
848 // SubExpressionUseData. This means they will remain in the use Bases.
849 if (FreeResult != Zero) {
850 SeparateSubExprs(SubExprs, FreeResult, SE);
851 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
852 std::map<const SCEV *, SubExprUseData>::iterator I =
853 SubExpressionUseData.find(SubExprs[j]);
854 SubExpressionUseData.erase(I);
859 // Otherwise, remove all of the CSE's we found from each of the base values.
860 for (unsigned i = 0; i != NumUses; ++i) {
861 // Uses outside the loop don't necessarily include the common base, but
862 // the final IV value coming into those uses does. Instead of trying to
863 // remove the pieces of the common base, which might not be there,
864 // subtract off the base to compensate for this.
865 if (!L->contains(Uses[i].Inst->getParent())) {
866 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
870 // Split the expression into subexprs.
871 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
873 // Remove any common subexpressions.
874 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
875 if (SubExpressionUseData.count(SubExprs[j])) {
876 SubExprs.erase(SubExprs.begin()+j);
880 // Finally, add the non-shared expressions together.
881 if (SubExprs.empty())
884 Uses[i].Base = SE->getAddExpr(SubExprs);
891 /// ValidScale - Check whether the given Scale is valid for all loads and
892 /// stores in UsersToProcess.
894 bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
895 const std::vector<BasedUser>& UsersToProcess) {
899 for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
900 // If this is a load or other access, pass the type of the access in.
901 const Type *AccessTy =
902 Type::getVoidTy(UsersToProcess[i].Inst->getContext());
903 if (isAddressUse(UsersToProcess[i].Inst,
904 UsersToProcess[i].OperandValToReplace))
905 AccessTy = getAccessType(UsersToProcess[i].Inst);
906 else if (isa<PHINode>(UsersToProcess[i].Inst))
909 TargetLowering::AddrMode AM;
910 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
911 AM.BaseOffs = SC->getValue()->getSExtValue();
912 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
915 // If load[imm+r*scale] is illegal, bail out.
916 if (!TLI->isLegalAddressingMode(AM, AccessTy))
922 /// ValidOffset - Check whether the given Offset is valid for all loads and
923 /// stores in UsersToProcess.
925 bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
928 const std::vector<BasedUser>& UsersToProcess) {
932 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
933 // If this is a load or other access, pass the type of the access in.
934 const Type *AccessTy =
935 Type::getVoidTy(UsersToProcess[i].Inst->getContext());
936 if (isAddressUse(UsersToProcess[i].Inst,
937 UsersToProcess[i].OperandValToReplace))
938 AccessTy = getAccessType(UsersToProcess[i].Inst);
939 else if (isa<PHINode>(UsersToProcess[i].Inst))
942 TargetLowering::AddrMode AM;
943 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
944 AM.BaseOffs = SC->getValue()->getSExtValue();
945 AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
946 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
949 // If load[imm+r*scale] is illegal, bail out.
950 if (!TLI->isLegalAddressingMode(AM, AccessTy))
956 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
958 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
962 Ty1 = SE->getEffectiveSCEVType(Ty1);
963 Ty2 = SE->getEffectiveSCEVType(Ty2);
966 if (Ty1->canLosslesslyBitCastTo(Ty2))
968 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
973 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
974 /// of a previous stride and it is a legal value for the target addressing
975 /// mode scale component and optional base reg. This allows the users of
976 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
977 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
979 /// If all uses are outside the loop, we don't require that all multiplies
980 /// be folded into the addressing mode, nor even that the factor be constant;
981 /// a multiply (executed once) outside the loop is better than another IV
982 /// within. Well, usually.
983 const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
984 bool AllUsesAreAddresses,
985 bool AllUsesAreOutsideLoop,
986 const SCEV *const &Stride,
987 IVExpr &IV, const Type *Ty,
988 const std::vector<BasedUser>& UsersToProcess) {
989 if (StrideNoReuse.count(Stride))
990 return SE->getIntegerSCEV(0, Stride->getType());
992 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
993 int64_t SInt = SC->getValue()->getSExtValue();
994 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
995 NewStride != e; ++NewStride) {
996 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
997 IVsByStride.find(IU->StrideOrder[NewStride]);
998 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) ||
999 StrideNoReuse.count(SI->first))
1001 // The other stride has no uses, don't reuse it.
1002 std::map<const SCEV *, IVUsersOfOneStride *>::iterator UI =
1003 IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
1004 if (UI->second->Users.empty())
1006 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1007 if (SI->first != Stride &&
1008 (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
1010 int64_t Scale = SInt / SSInt;
1011 // Check that this stride is valid for all the types used for loads and
1012 // stores; if it can be used for some and not others, we might as well use
1013 // the original stride everywhere, since we have to create the IV for it
1014 // anyway. If the scale is 1, then we don't need to worry about folding
1017 (AllUsesAreAddresses &&
1018 ValidScale(HasBaseReg, Scale, UsersToProcess))) {
1019 // Prefer to reuse an IV with a base of zero.
1020 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1021 IE = SI->second.IVs.end(); II != IE; ++II)
1022 // Only reuse previous IV if it would not require a type conversion
1023 // and if the base difference can be folded.
1024 if (II->Base->isZero() &&
1025 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1027 return SE->getIntegerSCEV(Scale, Stride->getType());
1029 // Otherwise, settle for an IV with a foldable base.
1030 if (AllUsesAreAddresses)
1031 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1032 IE = SI->second.IVs.end(); II != IE; ++II)
1033 // Only reuse previous IV if it would not require a type conversion
1034 // and if the base difference can be folded.
1035 if (SE->getEffectiveSCEVType(II->Base->getType()) ==
1036 SE->getEffectiveSCEVType(Ty) &&
1037 isa<SCEVConstant>(II->Base)) {
1039 cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
1040 if (Base > INT32_MIN && Base <= INT32_MAX &&
1041 ValidOffset(HasBaseReg, -Base * Scale,
1042 Scale, UsersToProcess)) {
1044 return SE->getIntegerSCEV(Scale, Stride->getType());
1049 } else if (AllUsesAreOutsideLoop) {
1050 // Accept nonconstant strides here; it is really really right to substitute
1051 // an existing IV if we can.
1052 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1053 NewStride != e; ++NewStride) {
1054 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1055 IVsByStride.find(IU->StrideOrder[NewStride]);
1056 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1058 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1059 if (SI->first != Stride && SSInt != 1)
1061 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1062 IE = SI->second.IVs.end(); II != IE; ++II)
1063 // Accept nonzero base here.
1064 // Only reuse previous IV if it would not require a type conversion.
1065 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1070 // Special case, old IV is -1*x and this one is x. Can treat this one as
1072 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1073 NewStride != e; ++NewStride) {
1074 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1075 IVsByStride.find(IU->StrideOrder[NewStride]);
1076 if (SI == IVsByStride.end())
1078 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1079 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1080 if (Stride == ME->getOperand(1) &&
1081 SC->getValue()->getSExtValue() == -1LL)
1082 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1083 IE = SI->second.IVs.end(); II != IE; ++II)
1084 // Accept nonzero base here.
1085 // Only reuse previous IV if it would not require type conversion.
1086 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1088 return SE->getIntegerSCEV(-1LL, Stride->getType());
1092 return SE->getIntegerSCEV(0, Stride->getType());
1095 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1096 /// returns true if Val's isUseOfPostIncrementedValue is true.
1097 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1098 return Val.isUseOfPostIncrementedValue;
1101 /// isNonConstantNegative - Return true if the specified scev is negated, but
1103 static bool isNonConstantNegative(const SCEV *const &Expr) {
1104 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1105 if (!Mul) return false;
1107 // If there is a constant factor, it will be first.
1108 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1109 if (!SC) return false;
1111 // Return true if the value is negative, this matches things like (-42 * V).
1112 return SC->getValue()->getValue().isNegative();
1115 /// CollectIVUsers - Transform our list of users and offsets to a bit more
1116 /// complex table. In this new vector, each 'BasedUser' contains 'Base', the
1117 /// base of the strided accesses, as well as the old information from Uses. We
1118 /// progressively move information from the Base field to the Imm field, until
1119 /// we eventually have the full access expression to rewrite the use.
1120 const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride,
1121 IVUsersOfOneStride &Uses,
1123 bool &AllUsesAreAddresses,
1124 bool &AllUsesAreOutsideLoop,
1125 std::vector<BasedUser> &UsersToProcess) {
1126 // FIXME: Generalize to non-affine IV's.
1127 if (!Stride->isLoopInvariant(L))
1128 return SE->getIntegerSCEV(0, Stride->getType());
1130 UsersToProcess.reserve(Uses.Users.size());
1131 for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
1132 E = Uses.Users.end(); I != E; ++I) {
1133 UsersToProcess.push_back(BasedUser(*I, SE));
1135 // Move any loop variant operands from the offset field to the immediate
1136 // field of the use, so that we don't try to use something before it is
1138 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1139 UsersToProcess.back().Imm, L, SE);
1140 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1141 "Base value is not loop invariant!");
1144 // We now have a whole bunch of uses of like-strided induction variables, but
1145 // they might all have different bases. We want to emit one PHI node for this
1146 // stride which we fold as many common expressions (between the IVs) into as
1147 // possible. Start by identifying the common expressions in the base values
1148 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1149 // "A+B"), emit it to the preheader, then remove the expression from the
1150 // UsersToProcess base values.
1151 const SCEV *CommonExprs =
1152 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1154 // Next, figure out what we can represent in the immediate fields of
1155 // instructions. If we can represent anything there, move it to the imm
1156 // fields of the BasedUsers. We do this so that it increases the commonality
1157 // of the remaining uses.
1158 unsigned NumPHI = 0;
1159 bool HasAddress = false;
1160 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1161 // If the user is not in the current loop, this means it is using the exit
1162 // value of the IV. Do not put anything in the base, make sure it's all in
1163 // the immediate field to allow as much factoring as possible.
1164 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1165 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1166 UsersToProcess[i].Base);
1167 UsersToProcess[i].Base =
1168 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1170 // Not all uses are outside the loop.
1171 AllUsesAreOutsideLoop = false;
1173 // Addressing modes can be folded into loads and stores. Be careful that
1174 // the store is through the expression, not of the expression though.
1176 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1177 UsersToProcess[i].OperandValToReplace);
1178 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1186 // If this use isn't an address, then not all uses are addresses.
1187 if (!isAddress && !isPHI)
1188 AllUsesAreAddresses = false;
1190 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1191 UsersToProcess[i].Imm, isAddress, L, SE);
1195 // If one of the use is a PHI node and all other uses are addresses, still
1196 // allow iv reuse. Essentially we are trading one constant multiplication
1197 // for one fewer iv.
1199 AllUsesAreAddresses = false;
1201 // There are no in-loop address uses.
1202 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1203 AllUsesAreAddresses = false;
1208 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1209 /// is valid and profitable for the given set of users of a stride. In
1210 /// full strength-reduction mode, all addresses at the current stride are
1211 /// strength-reduced all the way down to pointer arithmetic.
1213 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1214 const std::vector<BasedUser> &UsersToProcess,
1216 bool AllUsesAreAddresses,
1217 const SCEV *Stride) {
1218 if (!EnableFullLSRMode)
1221 // The heuristics below aim to avoid increasing register pressure, but
1222 // fully strength-reducing all the addresses increases the number of
1223 // add instructions, so don't do this when optimizing for size.
1224 // TODO: If the loop is large, the savings due to simpler addresses
1225 // may oughtweight the costs of the extra increment instructions.
1226 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1229 // TODO: For now, don't do full strength reduction if there could
1230 // potentially be greater-stride multiples of the current stride
1231 // which could reuse the current stride IV.
1232 if (IU->StrideOrder.back() != Stride)
1235 // Iterate through the uses to find conditions that automatically rule out
1237 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1238 const SCEV *Base = UsersToProcess[i].Base;
1239 const SCEV *Imm = UsersToProcess[i].Imm;
1240 // If any users have a loop-variant component, they can't be fully
1241 // strength-reduced.
1242 if (Imm && !Imm->isLoopInvariant(L))
1244 // If there are to users with the same base and the difference between
1245 // the two Imm values can't be folded into the address, full
1246 // strength reduction would increase register pressure.
1248 const SCEV *CurImm = UsersToProcess[i].Imm;
1249 if ((CurImm || Imm) && CurImm != Imm) {
1250 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1251 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
1252 const Instruction *Inst = UsersToProcess[i].Inst;
1253 const Type *AccessTy = getAccessType(Inst);
1254 const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1255 if (!Diff->isZero() &&
1256 (!AllUsesAreAddresses ||
1257 !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true)))
1260 } while (++i != e && Base == UsersToProcess[i].Base);
1263 // If there's exactly one user in this stride, fully strength-reducing it
1264 // won't increase register pressure. If it's starting from a non-zero base,
1265 // it'll be simpler this way.
1266 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1269 // Otherwise, if there are any users in this stride that don't require
1270 // a register for their base, full strength-reduction will increase
1271 // register pressure.
1272 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1273 if (UsersToProcess[i].Base->isZero())
1276 // Otherwise, go for it.
1280 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1281 /// with the specified start and step values in the specified loop.
1283 /// If NegateStride is true, the stride should be negated by using a
1284 /// subtract instead of an add.
1286 /// Return the created phi node.
1288 static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step,
1289 Instruction *IVIncInsertPt,
1291 SCEVExpander &Rewriter) {
1292 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1293 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1295 BasicBlock *Header = L->getHeader();
1296 BasicBlock *Preheader = L->getLoopPreheader();
1297 BasicBlock *LatchBlock = L->getLoopLatch();
1298 const Type *Ty = Start->getType();
1299 Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
1301 PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
1302 PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
1305 // If the stride is negative, insert a sub instead of an add for the
1307 bool isNegative = isNonConstantNegative(Step);
1308 const SCEV *IncAmount = Step;
1310 IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1312 // Insert an add instruction right before the terminator corresponding
1313 // to the back-edge or just before the only use. The location is determined
1314 // by the caller and passed in as IVIncInsertPt.
1315 Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
1316 Preheader->getTerminator());
1319 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1322 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1325 if (!isa<ConstantInt>(StepV)) ++NumVariable;
1327 PN->addIncoming(IncV, LatchBlock);
1333 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1334 // We want to emit code for users inside the loop first. To do this, we
1335 // rearrange BasedUser so that the entries at the end have
1336 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1337 // vector (so we handle them first).
1338 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1339 PartitionByIsUseOfPostIncrementedValue);
1341 // Sort this by base, so that things with the same base are handled
1342 // together. By partitioning first and stable-sorting later, we are
1343 // guaranteed that within each base we will pop off users from within the
1344 // loop before users outside of the loop with a particular base.
1346 // We would like to use stable_sort here, but we can't. The problem is that
1347 // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so
1348 // we don't have anything to do a '<' comparison on. Because we think the
1349 // number of uses is small, do a horrible bubble sort which just relies on
1351 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1352 // Get a base value.
1353 const SCEV *Base = UsersToProcess[i].Base;
1355 // Compact everything with this base to be consecutive with this one.
1356 for (unsigned j = i+1; j != e; ++j) {
1357 if (UsersToProcess[j].Base == Base) {
1358 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1365 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1366 /// UsersToProcess, meaning lowering addresses all the way down to direct
1367 /// pointer arithmetic.
1370 LoopStrengthReduce::PrepareToStrengthReduceFully(
1371 std::vector<BasedUser> &UsersToProcess,
1373 const SCEV *CommonExprs,
1375 SCEVExpander &PreheaderRewriter) {
1376 DEBUG(errs() << " Fully reducing all users\n");
1378 // Rewrite the UsersToProcess records, creating a separate PHI for each
1379 // unique Base value.
1380 Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
1381 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1382 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1383 // pick the first Imm value here to start with, and adjust it for the
1385 const SCEV *Imm = UsersToProcess[i].Imm;
1386 const SCEV *Base = UsersToProcess[i].Base;
1387 const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm);
1388 PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
1390 // Loop over all the users with the same base.
1392 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1393 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1394 UsersToProcess[i].Phi = Phi;
1395 assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1396 "ShouldUseFullStrengthReductionMode should reject this!");
1397 } while (++i != e && Base == UsersToProcess[i].Base);
1401 /// FindIVIncInsertPt - Return the location to insert the increment instruction.
1402 /// If the only use if a use of postinc value, (must be the loop termination
1403 /// condition), then insert it just before the use.
1404 static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
1406 if (UsersToProcess.size() == 1 &&
1407 UsersToProcess[0].isUseOfPostIncrementedValue &&
1408 L->contains(UsersToProcess[0].Inst->getParent()))
1409 return UsersToProcess[0].Inst;
1410 return L->getLoopLatch()->getTerminator();
1413 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1414 /// given users to share.
1417 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1418 std::vector<BasedUser> &UsersToProcess,
1420 const SCEV *CommonExprs,
1422 Instruction *IVIncInsertPt,
1424 SCEVExpander &PreheaderRewriter) {
1425 DEBUG(errs() << " Inserting new PHI:\n");
1427 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1428 Stride, IVIncInsertPt, L,
1431 // Remember this in case a later stride is multiple of this.
1432 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
1434 // All the users will share this new IV.
1435 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1436 UsersToProcess[i].Phi = Phi;
1438 DEBUG(errs() << " IV=");
1439 DEBUG(WriteAsOperand(errs(), Phi, /*PrintType=*/false));
1440 DEBUG(errs() << "\n");
1443 /// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
1444 /// reuse an induction variable with a stride that is a factor of the current
1445 /// induction variable.
1448 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1449 std::vector<BasedUser> &UsersToProcess,
1451 const IVExpr &ReuseIV,
1452 Instruction *PreInsertPt) {
1453 DEBUG(errs() << " Rewriting in terms of existing IV of STRIDE "
1454 << *ReuseIV.Stride << " and BASE " << *ReuseIV.Base << "\n");
1456 // All the users will share the reused IV.
1457 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1458 UsersToProcess[i].Phi = ReuseIV.PHI;
1460 Constant *C = dyn_cast<Constant>(CommonBaseV);
1462 (!C->isNullValue() &&
1463 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1465 // We want the common base emitted into the preheader! This is just
1466 // using cast as a copy so BitCast (no-op cast) is appropriate
1467 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1468 "commonbase", PreInsertPt);
1471 static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1472 const Type *AccessTy,
1473 std::vector<BasedUser> &UsersToProcess,
1474 const TargetLowering *TLI) {
1475 SmallVector<Instruction*, 16> AddrModeInsts;
1476 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1477 if (UsersToProcess[i].isUseOfPostIncrementedValue)
1479 ExtAddrMode AddrMode =
1480 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1481 AccessTy, UsersToProcess[i].Inst,
1482 AddrModeInsts, *TLI);
1483 if (GV && GV != AddrMode.BaseGV)
1485 if (Offset && !AddrMode.BaseOffs)
1486 // FIXME: How to accurate check it's immediate offset is folded.
1488 AddrModeInsts.clear();
1493 /// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a single
1494 /// stride of IV. All of the users may have different starting values, and this
1495 /// may not be the only stride.
1497 LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride,
1498 IVUsersOfOneStride &Uses,
1500 // If all the users are moved to another stride, then there is nothing to do.
1501 if (Uses.Users.empty())
1504 // Keep track if every use in UsersToProcess is an address. If they all are,
1505 // we may be able to rewrite the entire collection of them in terms of a
1506 // smaller-stride IV.
1507 bool AllUsesAreAddresses = true;
1509 // Keep track if every use of a single stride is outside the loop. If so,
1510 // we want to be more aggressive about reusing a smaller-stride IV; a
1511 // multiply outside the loop is better than another IV inside. Well, usually.
1512 bool AllUsesAreOutsideLoop = true;
1514 // Transform our list of users and offsets to a bit more complex table. In
1515 // this new vector, each 'BasedUser' contains 'Base' the base of the
1516 // strided accessas well as the old information from Uses. We progressively
1517 // move information from the Base field to the Imm field, until we eventually
1518 // have the full access expression to rewrite the use.
1519 std::vector<BasedUser> UsersToProcess;
1520 const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1521 AllUsesAreOutsideLoop,
1524 // Sort the UsersToProcess array so that users with common bases are
1525 // next to each other.
1526 SortUsersToProcess(UsersToProcess);
1528 // If we managed to find some expressions in common, we'll need to carry
1529 // their value in a register and add it in for each use. This will take up
1530 // a register operand, which potentially restricts what stride values are
1532 bool HaveCommonExprs = !CommonExprs->isZero();
1533 const Type *ReplacedTy = CommonExprs->getType();
1535 // If all uses are addresses, consider sinking the immediate part of the
1536 // common expression back into uses if they can fit in the immediate fields.
1537 if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1538 const SCEV *NewCommon = CommonExprs;
1539 const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
1540 MoveImmediateValues(TLI, Type::getVoidTy(
1541 L->getLoopPreheader()->getContext()),
1542 NewCommon, Imm, true, L, SE);
1543 if (!Imm->isZero()) {
1546 // If the immediate part of the common expression is a GV, check if it's
1547 // possible to fold it into the target addressing mode.
1548 GlobalValue *GV = 0;
1549 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
1550 GV = dyn_cast<GlobalValue>(SU->getValue());
1552 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1553 Offset = SC->getValue()->getSExtValue();
1555 // Pass VoidTy as the AccessTy to be conservative, because
1556 // there could be multiple access types among all the uses.
1557 DoSink = IsImmFoldedIntoAddrMode(GV, Offset,
1558 Type::getVoidTy(L->getLoopPreheader()->getContext()),
1559 UsersToProcess, TLI);
1562 DEBUG(errs() << " Sinking " << *Imm << " back down into uses\n");
1563 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1564 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1565 CommonExprs = NewCommon;
1566 HaveCommonExprs = !CommonExprs->isZero();
1572 // Now that we know what we need to do, insert the PHI node itself.
1574 DEBUG(errs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1576 << " Common base: " << *CommonExprs << "\n");
1578 SCEVExpander Rewriter(*SE);
1579 SCEVExpander PreheaderRewriter(*SE);
1581 BasicBlock *Preheader = L->getLoopPreheader();
1582 Instruction *PreInsertPt = Preheader->getTerminator();
1583 BasicBlock *LatchBlock = L->getLoopLatch();
1584 Instruction *IVIncInsertPt = LatchBlock->getTerminator();
1586 Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
1588 const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1589 IVExpr ReuseIV(SE->getIntegerSCEV(0,
1590 Type::getInt32Ty(Preheader->getContext())),
1591 SE->getIntegerSCEV(0,
1592 Type::getInt32Ty(Preheader->getContext())),
1595 // Choose a strength-reduction strategy and prepare for it by creating
1596 // the necessary PHIs and adjusting the bookkeeping.
1597 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1598 AllUsesAreAddresses, Stride)) {
1599 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1602 // Emit the initial base value into the loop preheader.
1603 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
1606 // If all uses are addresses, check if it is possible to reuse an IV. The
1607 // new IV must have a stride that is a multiple of the old stride; the
1608 // multiple must be a number that can be encoded in the scale field of the
1609 // target addressing mode; and we must have a valid instruction after this
1610 // substitution, including the immediate field, if any.
1611 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1612 AllUsesAreOutsideLoop,
1613 Stride, ReuseIV, ReplacedTy,
1615 if (!RewriteFactor->isZero())
1616 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1617 ReuseIV, PreInsertPt);
1619 IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
1620 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1621 CommonBaseV, IVIncInsertPt,
1622 L, PreheaderRewriter);
1626 // Process all the users now, replacing their strided uses with
1627 // strength-reduced forms. This outer loop handles all bases, the inner
1628 // loop handles all users of a particular base.
1629 while (!UsersToProcess.empty()) {
1630 const SCEV *Base = UsersToProcess.back().Base;
1631 Instruction *Inst = UsersToProcess.back().Inst;
1633 // Emit the code for Base into the preheader.
1635 if (!Base->isZero()) {
1636 BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
1638 DEBUG(errs() << " INSERTING code for BASE = " << *Base << ":");
1639 if (BaseV->hasName())
1640 DEBUG(errs() << " Result value name = %" << BaseV->getName());
1641 DEBUG(errs() << "\n");
1643 // If BaseV is a non-zero constant, make sure that it gets inserted into
1644 // the preheader, instead of being forward substituted into the uses. We
1645 // do this by forcing a BitCast (noop cast) to be inserted into the
1646 // preheader in this case.
1647 if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) &&
1648 isa<Constant>(BaseV)) {
1649 // We want this constant emitted into the preheader! This is just
1650 // using cast as a copy so BitCast (no-op cast) is appropriate
1651 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1656 // Emit the code to add the immediate offset to the Phi value, just before
1657 // the instructions that we identified as using this stride and base.
1659 // FIXME: Use emitted users to emit other users.
1660 BasedUser &User = UsersToProcess.back();
1662 DEBUG(errs() << " Examining ");
1663 if (User.isUseOfPostIncrementedValue)
1664 DEBUG(errs() << "postinc");
1666 DEBUG(errs() << "preinc");
1667 DEBUG(errs() << " use ");
1668 DEBUG(WriteAsOperand(errs(), UsersToProcess.back().OperandValToReplace,
1669 /*PrintType=*/false));
1670 DEBUG(errs() << " in Inst: " << *User.Inst);
1672 // If this instruction wants to use the post-incremented value, move it
1673 // after the post-inc and use its value instead of the PHI.
1674 Value *RewriteOp = User.Phi;
1675 if (User.isUseOfPostIncrementedValue) {
1676 RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
1677 // If this user is in the loop, make sure it is the last thing in the
1678 // loop to ensure it is dominated by the increment. In case it's the
1679 // only use of the iv, the increment instruction is already before the
1681 if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt)
1682 User.Inst->moveBefore(IVIncInsertPt);
1685 const SCEV *RewriteExpr = SE->getUnknown(RewriteOp);
1687 if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
1688 SE->getEffectiveSCEVType(ReplacedTy)) {
1689 assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
1690 SE->getTypeSizeInBits(ReplacedTy) &&
1691 "Unexpected widening cast!");
1692 RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
1695 // If we had to insert new instructions for RewriteOp, we have to
1696 // consider that they may not have been able to end up immediately
1697 // next to RewriteOp, because non-PHI instructions may never precede
1698 // PHI instructions in a block. In this case, remember where the last
1699 // instruction was inserted so that if we're replacing a different
1700 // PHI node, we can use the later point to expand the final
1702 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1703 if (RewriteOp == User.Phi) NewBasePt = 0;
1705 // Clear the SCEVExpander's expression map so that we are guaranteed
1706 // to have the code emitted where we expect it.
1709 // If we are reusing the iv, then it must be multiplied by a constant
1710 // factor to take advantage of the addressing mode scale component.
1711 if (!RewriteFactor->isZero()) {
1712 // If we're reusing an IV with a nonzero base (currently this happens
1713 // only when all reuses are outside the loop) subtract that base here.
1714 // The base has been used to initialize the PHI node but we don't want
1716 if (!ReuseIV.Base->isZero()) {
1717 const SCEV *typedBase = ReuseIV.Base;
1718 if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
1719 SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
1720 // It's possible the original IV is a larger type than the new IV,
1721 // in which case we have to truncate the Base. We checked in
1722 // RequiresTypeConversion that this is valid.
1723 assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
1724 SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
1725 "Unexpected lengthening conversion!");
1726 typedBase = SE->getTruncateExpr(ReuseIV.Base,
1727 RewriteExpr->getType());
1729 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1732 // Multiply old variable, with base removed, by new scale factor.
1733 RewriteExpr = SE->getMulExpr(RewriteFactor,
1736 // The common base is emitted in the loop preheader. But since we
1737 // are reusing an IV, it has not been used to initialize the PHI node.
1738 // Add it to the expression used to rewrite the uses.
1739 // When this use is outside the loop, we earlier subtracted the
1740 // common base, and are adding it back here. Use the same expression
1741 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1742 if (!CommonExprs->isZero()) {
1743 if (L->contains(User.Inst->getParent()))
1744 RewriteExpr = SE->getAddExpr(RewriteExpr,
1745 SE->getUnknown(CommonBaseV));
1747 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1751 // Now that we know what we need to do, insert code before User for the
1752 // immediate and any loop-variant expressions.
1754 // Add BaseV to the PHI value if needed.
1755 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1757 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1761 // Mark old value we replaced as possibly dead, so that it is eliminated
1762 // if we just replaced the last use of that value.
1763 DeadInsts.push_back(User.OperandValToReplace);
1765 UsersToProcess.pop_back();
1768 // If there are any more users to process with the same base, process them
1769 // now. We sorted by base above, so we just have to check the last elt.
1770 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1771 // TODO: Next, find out which base index is the most common, pull it out.
1774 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1775 // different starting values, into different PHIs.
1778 void LoopStrengthReduce::StrengthReduceIVUsers(Loop *L) {
1779 // Note: this processes each stride/type pair individually. All users
1780 // passed into StrengthReduceIVUsersOfStride have the same type AND stride.
1781 // Also, note that we iterate over IVUsesByStride indirectly by using
1782 // StrideOrder. This extra layer of indirection makes the ordering of
1783 // strides deterministic - not dependent on map order.
1784 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) {
1785 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1786 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1787 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1788 // FIXME: Generalize to non-affine IV's.
1789 if (!SI->first->isLoopInvariant(L))
1791 StrengthReduceIVUsersOfStride(SI->first, *SI->second, L);
1795 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1796 /// set the IV user and stride information and return true, otherwise return
1798 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond,
1799 IVStrideUse *&CondUse,
1800 const SCEV* &CondStride) {
1801 for (unsigned Stride = 0, e = IU->StrideOrder.size();
1802 Stride != e && !CondUse; ++Stride) {
1803 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1804 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1805 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1807 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1808 E = SI->second->Users.end(); UI != E; ++UI)
1809 if (UI->getUser() == Cond) {
1810 // NOTE: we could handle setcc instructions with multiple uses here, but
1811 // InstCombine does it as well for simple uses, it's not clear that it
1812 // occurs enough in real life to handle.
1814 CondStride = SI->first;
1822 // Constant strides come first which in turns are sorted by their absolute
1823 // values. If absolute values are the same, then positive strides comes first.
1825 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1826 struct StrideCompare {
1827 const ScalarEvolution *SE;
1828 explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
1830 bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) {
1831 const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1832 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1834 int64_t LV = LHSC->getValue()->getSExtValue();
1835 int64_t RV = RHSC->getValue()->getSExtValue();
1836 uint64_t ALV = (LV < 0) ? -LV : LV;
1837 uint64_t ARV = (RV < 0) ? -RV : RV;
1845 // If it's the same value but different type, sort by bit width so
1846 // that we emit larger induction variables before smaller
1847 // ones, letting the smaller be re-written in terms of larger ones.
1848 return SE->getTypeSizeInBits(RHS->getType()) <
1849 SE->getTypeSizeInBits(LHS->getType());
1851 return LHSC && !RHSC;
1856 /// ChangeCompareStride - If a loop termination compare instruction is the
1857 /// only use of its stride, and the compaison is against a constant value,
1858 /// try eliminate the stride by moving the compare instruction to another
1859 /// stride and change its constant operand accordingly. e.g.
1865 /// if (v2 < 10) goto loop
1870 /// if (v1 < 30) goto loop
1871 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1872 IVStrideUse* &CondUse,
1873 const SCEV* &CondStride,
1875 // If there's only one stride in the loop, there's nothing to do here.
1876 if (IU->StrideOrder.size() < 2)
1878 // If there are other users of the condition's stride, don't bother
1879 // trying to change the condition because the stride will still
1881 std::map<const SCEV *, IVUsersOfOneStride *>::iterator I =
1882 IU->IVUsesByStride.find(CondStride);
1883 if (I == IU->IVUsesByStride.end())
1885 if (I->second->Users.size() > 1) {
1886 for (ilist<IVStrideUse>::iterator II = I->second->Users.begin(),
1887 EE = I->second->Users.end(); II != EE; ++II) {
1888 if (II->getUser() == Cond)
1890 if (!isInstructionTriviallyDead(II->getUser()))
1894 // Only handle constant strides for now.
1895 const SCEVConstant *SC = dyn_cast<SCEVConstant>(CondStride);
1896 if (!SC) return Cond;
1898 ICmpInst::Predicate Predicate = Cond->getPredicate();
1899 int64_t CmpSSInt = SC->getValue()->getSExtValue();
1900 unsigned BitWidth = SE->getTypeSizeInBits(CondStride->getType());
1901 uint64_t SignBit = 1ULL << (BitWidth-1);
1902 const Type *CmpTy = Cond->getOperand(0)->getType();
1903 const Type *NewCmpTy = NULL;
1904 unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
1905 unsigned NewTyBits = 0;
1906 const SCEV *NewStride = NULL;
1907 Value *NewCmpLHS = NULL;
1908 Value *NewCmpRHS = NULL;
1910 const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy);
1912 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
1913 int64_t CmpVal = C->getValue().getSExtValue();
1915 // Check the relevant induction variable for conformance to
1917 const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
1918 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
1919 if (!AR || !AR->isAffine())
1922 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
1923 // Check stride constant and the comparision constant signs to detect
1926 if ((StartC->getValue()->getSExtValue() < CmpVal && CmpSSInt < 0) ||
1927 (StartC->getValue()->getSExtValue() > CmpVal && CmpSSInt > 0))
1930 // More restrictive check for the other cases.
1931 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1935 // Look for a suitable stride / iv as replacement.
1936 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
1937 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1938 IU->IVUsesByStride.find(IU->StrideOrder[i]);
1939 if (!isa<SCEVConstant>(SI->first) || SI->second->Users.empty())
1941 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1942 if (SSInt == CmpSSInt ||
1943 abs64(SSInt) < abs64(CmpSSInt) ||
1944 (SSInt % CmpSSInt) != 0)
1947 Scale = SSInt / CmpSSInt;
1948 int64_t NewCmpVal = CmpVal * Scale;
1950 // If old icmp value fits in icmp immediate field, but the new one doesn't
1951 // try something else.
1953 TLI->isLegalICmpImmediate(CmpVal) &&
1954 !TLI->isLegalICmpImmediate(NewCmpVal))
1957 APInt Mul = APInt(BitWidth*2, CmpVal, true);
1958 Mul = Mul * APInt(BitWidth*2, Scale, true);
1959 // Check for overflow.
1960 if (!Mul.isSignedIntN(BitWidth))
1962 // Check for overflow in the stride's type too.
1963 if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
1966 // Watch out for overflow.
1967 if (ICmpInst::isSigned(Predicate) &&
1968 (CmpVal & SignBit) != (NewCmpVal & SignBit))
1971 // Pick the best iv to use trying to avoid a cast.
1973 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1974 E = SI->second->Users.end(); UI != E; ++UI) {
1975 Value *Op = UI->getOperandValToReplace();
1977 // If the IVStrideUse implies a cast, check for an actual cast which
1978 // can be used to find the original IV expression.
1979 if (SE->getEffectiveSCEVType(Op->getType()) !=
1980 SE->getEffectiveSCEVType(SI->first->getType())) {
1981 CastInst *CI = dyn_cast<CastInst>(Op);
1982 // If it's not a simple cast, it's complicated.
1985 // If it's a cast from a type other than the stride type,
1986 // it's complicated.
1987 if (CI->getOperand(0)->getType() != SI->first->getType())
1989 // Ok, we found the IV expression in the stride's type.
1990 Op = CI->getOperand(0);
1994 if (NewCmpLHS->getType() == CmpTy)
2000 NewCmpTy = NewCmpLHS->getType();
2001 NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
2002 const Type *NewCmpIntTy = IntegerType::get(Cond->getContext(), NewTyBits);
2003 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
2004 // Check if it is possible to rewrite it using
2005 // an iv / stride of a smaller integer type.
2006 unsigned Bits = NewTyBits;
2007 if (ICmpInst::isSigned(Predicate))
2009 uint64_t Mask = (1ULL << Bits) - 1;
2010 if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
2014 // Don't rewrite if use offset is non-constant and the new type is
2015 // of a different type.
2016 // FIXME: too conservative?
2017 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
2021 bool AllUsesAreAddresses = true;
2022 bool AllUsesAreOutsideLoop = true;
2023 std::vector<BasedUser> UsersToProcess;
2024 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2025 AllUsesAreAddresses,
2026 AllUsesAreOutsideLoop,
2028 // Avoid rewriting the compare instruction with an iv of new stride
2029 // if it's likely the new stride uses will be rewritten using the
2030 // stride of the compare instruction.
2031 if (AllUsesAreAddresses &&
2032 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
2036 // Avoid rewriting the compare instruction with an iv which has
2037 // implicit extension or truncation built into it.
2038 // TODO: This is over-conservative.
2039 if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits)
2042 // If scale is negative, use swapped predicate unless it's testing
2044 if (Scale < 0 && !Cond->isEquality())
2045 Predicate = ICmpInst::getSwappedPredicate(Predicate);
2047 NewStride = IU->StrideOrder[i];
2048 if (!isa<PointerType>(NewCmpTy))
2049 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
2051 Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
2052 NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
2054 NewOffset = TyBits == NewTyBits
2055 ? SE->getMulExpr(CondUse->getOffset(),
2056 SE->getConstant(CmpTy, Scale))
2057 : SE->getConstant(NewCmpIntTy,
2058 cast<SCEVConstant>(CondUse->getOffset())->getValue()
2059 ->getSExtValue()*Scale);
2064 // Forgo this transformation if it the increment happens to be
2065 // unfortunately positioned after the condition, and the condition
2066 // has multiple uses which prevent it from being moved immediately
2067 // before the branch. See
2068 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2069 // for an example of this situation.
2070 if (!Cond->hasOneUse()) {
2071 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2078 // Create a new compare instruction using new stride / iv.
2079 ICmpInst *OldCond = Cond;
2080 // Insert new compare instruction.
2081 Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS,
2082 L->getHeader()->getName() + ".termcond");
2084 DEBUG(errs() << " Change compare stride in Inst " << *OldCond);
2085 DEBUG(errs() << " to " << *Cond << '\n');
2087 // Remove the old compare instruction. The old indvar is probably dead too.
2088 DeadInsts.push_back(CondUse->getOperandValToReplace());
2089 OldCond->replaceAllUsesWith(Cond);
2090 OldCond->eraseFromParent();
2092 IU->IVUsesByStride[NewStride]->addUser(NewOffset, Cond, NewCmpLHS);
2093 CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
2094 CondStride = NewStride;
2102 /// OptimizeMax - Rewrite the loop's terminating condition if it uses
2103 /// a max computation.
2105 /// This is a narrow solution to a specific, but acute, problem. For loops
2111 /// } while (++i < n);
2113 /// the trip count isn't just 'n', because 'n' might not be positive. And
2114 /// unfortunately this can come up even for loops where the user didn't use
2115 /// a C do-while loop. For example, seemingly well-behaved top-test loops
2116 /// will commonly be lowered like this:
2122 /// } while (++i < n);
2125 /// and then it's possible for subsequent optimization to obscure the if
2126 /// test in such a way that indvars can't find it.
2128 /// When indvars can't find the if test in loops like this, it creates a
2129 /// max expression, which allows it to give the loop a canonical
2130 /// induction variable:
2133 /// max = n < 1 ? 1 : n;
2136 /// } while (++i != max);
2138 /// Canonical induction variables are necessary because the loop passes
2139 /// are designed around them. The most obvious example of this is the
2140 /// LoopInfo analysis, which doesn't remember trip count values. It
2141 /// expects to be able to rediscover the trip count each time it is
2142 /// needed, and it does this using a simple analyis that only succeeds if
2143 /// the loop has a canonical induction variable.
2145 /// However, when it comes time to generate code, the maximum operation
2146 /// can be quite costly, especially if it's inside of an outer loop.
2148 /// This function solves this problem by detecting this type of loop and
2149 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2150 /// the instructions for the maximum computation.
2152 ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
2153 IVStrideUse* &CondUse) {
2154 // Check that the loop matches the pattern we're looking for.
2155 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2156 Cond->getPredicate() != CmpInst::ICMP_NE)
2159 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2160 if (!Sel || !Sel->hasOneUse()) return Cond;
2162 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2163 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2165 const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2167 // Add one to the backedge-taken count to get the trip count.
2168 const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2170 // Check for a max calculation that matches the pattern.
2171 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
2173 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
2174 if (Max != SE->getSCEV(Sel)) return Cond;
2176 // To handle a max with more than two operands, this optimization would
2177 // require additional checking and setup.
2178 if (Max->getNumOperands() != 2)
2181 const SCEV *MaxLHS = Max->getOperand(0);
2182 const SCEV *MaxRHS = Max->getOperand(1);
2183 if (!MaxLHS || MaxLHS != One) return Cond;
2185 // Check the relevant induction variable for conformance to
2187 const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2188 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2189 if (!AR || !AR->isAffine() ||
2190 AR->getStart() != One ||
2191 AR->getStepRecurrence(*SE) != One)
2194 assert(AR->getLoop() == L &&
2195 "Loop condition operand is an addrec in a different loop!");
2197 // Check the right operand of the select, and remember it, as it will
2198 // be used in the new comparison instruction.
2200 if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS)
2201 NewRHS = Sel->getOperand(1);
2202 else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS)
2203 NewRHS = Sel->getOperand(2);
2204 if (!NewRHS) return Cond;
2206 // Determine the new comparison opcode. It may be signed or unsigned,
2207 // and the original comparison may be either equality or inequality.
2208 CmpInst::Predicate Pred =
2209 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
2210 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2211 Pred = CmpInst::getInversePredicate(Pred);
2213 // Ok, everything looks ok to change the condition into an SLT or SGE and
2214 // delete the max calculation.
2216 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2218 // Delete the max calculation instructions.
2219 Cond->replaceAllUsesWith(NewCond);
2220 CondUse->setUser(NewCond);
2221 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2222 Cond->eraseFromParent();
2223 Sel->eraseFromParent();
2224 if (Cmp->use_empty())
2225 Cmp->eraseFromParent();
2229 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2230 /// inside the loop then try to eliminate the cast opeation.
2231 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2233 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2234 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2237 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
2239 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2240 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2241 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2242 if (!isa<SCEVConstant>(SI->first))
2245 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
2246 E = SI->second->Users.end(); UI != E; /* empty */) {
2247 ilist<IVStrideUse>::iterator CandidateUI = UI;
2249 Instruction *ShadowUse = CandidateUI->getUser();
2250 const Type *DestTy = NULL;
2252 /* If shadow use is a int->float cast then insert a second IV
2253 to eliminate this cast.
2255 for (unsigned i = 0; i < n; ++i)
2261 for (unsigned i = 0; i < n; ++i, ++d)
2264 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
2265 DestTy = UCast->getDestTy();
2266 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
2267 DestTy = SCast->getDestTy();
2268 if (!DestTy) continue;
2271 // If target does not support DestTy natively then do not apply
2272 // this transformation.
2273 EVT DVT = TLI->getValueType(DestTy);
2274 if (!TLI->isTypeLegal(DVT)) continue;
2277 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2279 if (PH->getNumIncomingValues() != 2) continue;
2281 const Type *SrcTy = PH->getType();
2282 int Mantissa = DestTy->getFPMantissaWidth();
2283 if (Mantissa == -1) continue;
2284 if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
2287 unsigned Entry, Latch;
2288 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2296 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2297 if (!Init) continue;
2298 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2300 BinaryOperator *Incr =
2301 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2302 if (!Incr) continue;
2303 if (Incr->getOpcode() != Instruction::Add
2304 && Incr->getOpcode() != Instruction::Sub)
2307 /* Initialize new IV, double d = 0.0 in above example. */
2308 ConstantInt *C = NULL;
2309 if (Incr->getOperand(0) == PH)
2310 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2311 else if (Incr->getOperand(1) == PH)
2312 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2318 // Ignore negative constants, as the code below doesn't handle them
2319 // correctly. TODO: Remove this restriction.
2320 if (!C->getValue().isStrictlyPositive()) continue;
2322 /* Add new PHINode. */
2323 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2325 /* create new increment. '++d' in above example. */
2326 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2327 BinaryOperator *NewIncr =
2328 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2329 Instruction::FAdd : Instruction::FSub,
2330 NewPH, CFP, "IV.S.next.", Incr);
2332 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2333 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2335 /* Remove cast operation */
2336 ShadowUse->replaceAllUsesWith(NewPH);
2337 ShadowUse->eraseFromParent();
2344 /// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2345 /// uses in the loop, look to see if we can eliminate some, in favor of using
2346 /// common indvars for the different uses.
2347 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2348 // TODO: implement optzns here.
2350 OptimizeShadowIV(L);
2353 bool LoopStrengthReduce::StrideMightBeShared(const SCEV* Stride, Loop *L,
2355 int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
2356 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2357 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2358 IU->IVUsesByStride.find(IU->StrideOrder[i]);
2359 const SCEV *Share = SI->first;
2360 if (!isa<SCEVConstant>(SI->first) || Share == Stride)
2362 int64_t SSInt = cast<SCEVConstant>(Share)->getValue()->getSExtValue();
2364 return true; // This can definitely be reused.
2365 if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
2367 int64_t Scale = SSInt / SInt;
2368 bool AllUsesAreAddresses = true;
2369 bool AllUsesAreOutsideLoop = true;
2370 std::vector<BasedUser> UsersToProcess;
2371 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2372 AllUsesAreAddresses,
2373 AllUsesAreOutsideLoop,
2375 if (AllUsesAreAddresses &&
2376 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) {
2379 // Any pre-inc iv use?
2380 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[Share];
2381 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2382 E = StrideUses.Users.end(); I != E; ++I) {
2383 if (!I->isUseOfPostIncrementedValue())
2391 /// isUsedByExitBranch - Return true if icmp is used by a loop terminating
2392 /// conditional branch or it's and / or with other conditions before being used
2393 /// as the condition.
2394 static bool isUsedByExitBranch(ICmpInst *Cond, Loop *L) {
2395 BasicBlock *CondBB = Cond->getParent();
2396 if (!L->isLoopExiting(CondBB))
2398 BranchInst *TermBr = dyn_cast<BranchInst>(CondBB->getTerminator());
2399 if (!TermBr || !TermBr->isConditional())
2402 Value *User = *Cond->use_begin();
2403 Instruction *UserInst = dyn_cast<Instruction>(User);
2405 (UserInst->getOpcode() == Instruction::And ||
2406 UserInst->getOpcode() == Instruction::Or)) {
2407 if (!UserInst->hasOneUse() || UserInst->getParent() != CondBB)
2409 User = *User->use_begin();
2410 UserInst = dyn_cast<Instruction>(User);
2412 return User == TermBr;
2415 static bool ShouldCountToZero(ICmpInst *Cond, IVStrideUse* &CondUse,
2416 ScalarEvolution *SE, Loop *L,
2417 const TargetLowering *TLI = 0) {
2418 if (!L->contains(Cond->getParent()))
2421 if (!isa<SCEVConstant>(CondUse->getOffset()))
2424 // Handle only tests for equality for the moment.
2425 if (!Cond->isEquality() || !Cond->hasOneUse())
2427 if (!isUsedByExitBranch(Cond, L))
2430 Value *CondOp0 = Cond->getOperand(0);
2431 const SCEV *IV = SE->getSCEV(CondOp0);
2432 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2433 if (!AR || !AR->isAffine())
2436 const SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
2437 if (!SC || SC->getValue()->getSExtValue() < 0)
2438 // If it's already counting down, don't do anything.
2441 // If the RHS of the comparison is not an loop invariant, the rewrite
2442 // cannot be done. Also bail out if it's already comparing against a zero.
2443 // If we are checking this before cmp stride optimization, check if it's
2444 // comparing against a already legal immediate.
2445 Value *RHS = Cond->getOperand(1);
2446 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS);
2447 if (!L->isLoopInvariant(RHS) ||
2448 (RHSC && RHSC->isZero()) ||
2449 (RHSC && TLI && TLI->isLegalICmpImmediate(RHSC->getSExtValue())))
2452 // Make sure the IV is only used for counting. Value may be preinc or
2453 // postinc; 2 uses in either case.
2454 if (!CondOp0->hasNUses(2))
2460 /// OptimizeLoopTermCond - Change loop terminating condition to use the
2461 /// postinc iv when possible.
2462 void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
2463 BasicBlock *LatchBlock = L->getLoopLatch();
2464 bool LatchExit = L->isLoopExiting(LatchBlock);
2465 SmallVector<BasicBlock*, 8> ExitingBlocks;
2466 L->getExitingBlocks(ExitingBlocks);
2468 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2469 BasicBlock *ExitingBlock = ExitingBlocks[i];
2471 // Finally, get the terminating condition for the loop if possible. If we
2472 // can, we want to change it to use a post-incremented version of its
2473 // induction variable, to allow coalescing the live ranges for the IV into
2474 // one register value.
2476 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2479 // FIXME: Overly conservative, termination condition could be an 'or' etc..
2480 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2483 // Search IVUsesByStride to find Cond's IVUse if there is one.
2484 IVStrideUse *CondUse = 0;
2485 const SCEV *CondStride = 0;
2486 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2487 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2490 // If the latch block is exiting and it's not a single block loop, it's
2491 // not safe to use postinc iv in other exiting blocks. FIXME: overly
2492 // conservative? How about icmp stride optimization?
2493 bool UsePostInc = !(e > 1 && LatchExit && ExitingBlock != LatchBlock);
2494 if (UsePostInc && ExitingBlock != LatchBlock) {
2495 if (!Cond->hasOneUse())
2496 // See below, we don't want the condition to be cloned.
2499 // If exiting block is the latch block, we know it's safe and profitable
2500 // to transform the icmp to use post-inc iv. Otherwise do so only if it
2501 // would not reuse another iv and its iv would be reused by other uses.
2502 // We are optimizing for the case where the icmp is the only use of the
2504 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[CondStride];
2505 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2506 E = StrideUses.Users.end(); I != E; ++I) {
2507 if (I->getUser() == Cond)
2509 if (!I->isUseOfPostIncrementedValue()) {
2516 // If iv for the stride might be shared and any of the users use pre-inc
2517 // iv might be used, then it's not safe to use post-inc iv.
2519 isa<SCEVConstant>(CondStride) &&
2520 StrideMightBeShared(CondStride, L, true))
2524 // If the trip count is computed in terms of a max (due to ScalarEvolution
2525 // being unable to find a sufficient guard, for example), change the loop
2526 // comparison to use SLT or ULT instead of NE.
2527 Cond = OptimizeMax(L, Cond, CondUse);
2529 // If possible, change stride and operands of the compare instruction to
2530 // eliminate one stride. However, avoid rewriting the compare instruction
2531 // with an iv of new stride if it's likely the new stride uses will be
2532 // rewritten using the stride of the compare instruction.
2533 if (ExitingBlock == LatchBlock && isa<SCEVConstant>(CondStride)) {
2534 // If the condition stride is a constant and it's the only use, we might
2535 // want to optimize it first by turning it to count toward zero.
2536 if (!StrideMightBeShared(CondStride, L, false) &&
2537 !ShouldCountToZero(Cond, CondUse, SE, L, TLI))
2538 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2544 DEBUG(errs() << " Change loop exiting icmp to use postinc iv: "
2547 // It's possible for the setcc instruction to be anywhere in the loop, and
2548 // possible for it to have multiple users. If it is not immediately before
2549 // the exiting block branch, move it.
2550 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2551 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2552 Cond->moveBefore(TermBr);
2554 // Otherwise, clone the terminating condition and insert into the
2556 Cond = cast<ICmpInst>(Cond->clone());
2557 Cond->setName(L->getHeader()->getName() + ".termcond");
2558 ExitingBlock->getInstList().insert(TermBr, Cond);
2560 // Clone the IVUse, as the old use still exists!
2561 IU->IVUsesByStride[CondStride]->addUser(CondUse->getOffset(), Cond,
2562 CondUse->getOperandValToReplace());
2563 CondUse = &IU->IVUsesByStride[CondStride]->Users.back();
2567 // If we get to here, we know that we can transform the setcc instruction to
2568 // use the post-incremented version of the IV, allowing us to coalesce the
2569 // live ranges for the IV correctly.
2570 CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), CondStride));
2571 CondUse->setIsUseOfPostIncrementedValue(true);
2578 bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride,
2579 IVStrideUse* &CondUse,
2581 // If the only use is an icmp of a loop exiting conditional branch, then
2582 // attempt the optimization.
2583 BasedUser User = BasedUser(*CondUse, SE);
2584 assert(isa<ICmpInst>(User.Inst) && "Expecting an ICMPInst!");
2585 ICmpInst *Cond = cast<ICmpInst>(User.Inst);
2587 // Less strict check now that compare stride optimization is done.
2588 if (!ShouldCountToZero(Cond, CondUse, SE, L))
2591 Value *CondOp0 = Cond->getOperand(0);
2592 PHINode *PHIExpr = dyn_cast<PHINode>(CondOp0);
2595 // Value tested is postinc. Find the phi node.
2596 Incr = dyn_cast<BinaryOperator>(CondOp0);
2597 // FIXME: Just use User.OperandValToReplace here?
2598 if (!Incr || Incr->getOpcode() != Instruction::Add)
2601 PHIExpr = dyn_cast<PHINode>(Incr->getOperand(0));
2604 // 1 use for preinc value, the increment.
2605 if (!PHIExpr->hasOneUse())
2608 assert(isa<PHINode>(CondOp0) &&
2609 "Unexpected loop exiting counting instruction sequence!");
2610 PHIExpr = cast<PHINode>(CondOp0);
2611 // Value tested is preinc. Find the increment.
2612 // A CmpInst is not a BinaryOperator; we depend on this.
2613 Instruction::use_iterator UI = PHIExpr->use_begin();
2614 Incr = dyn_cast<BinaryOperator>(UI);
2616 Incr = dyn_cast<BinaryOperator>(++UI);
2617 // One use for postinc value, the phi. Unnecessarily conservative?
2618 if (!Incr || !Incr->hasOneUse() || Incr->getOpcode() != Instruction::Add)
2622 // Replace the increment with a decrement.
2623 DEBUG(errs() << "LSR: Examining use ");
2624 DEBUG(WriteAsOperand(errs(), CondOp0, /*PrintType=*/false));
2625 DEBUG(errs() << " in Inst: " << *Cond << '\n');
2626 BinaryOperator *Decr = BinaryOperator::Create(Instruction::Sub,
2627 Incr->getOperand(0), Incr->getOperand(1), "tmp", Incr);
2628 Incr->replaceAllUsesWith(Decr);
2629 Incr->eraseFromParent();
2631 // Substitute endval-startval for the original startval, and 0 for the
2632 // original endval. Since we're only testing for equality this is OK even
2633 // if the computation wraps around.
2634 BasicBlock *Preheader = L->getLoopPreheader();
2635 Instruction *PreInsertPt = Preheader->getTerminator();
2636 unsigned InBlock = L->contains(PHIExpr->getIncomingBlock(0)) ? 1 : 0;
2637 Value *StartVal = PHIExpr->getIncomingValue(InBlock);
2638 Value *EndVal = Cond->getOperand(1);
2639 DEBUG(errs() << " Optimize loop counting iv to count down ["
2640 << *EndVal << " .. " << *StartVal << "]\n");
2642 // FIXME: check for case where both are constant.
2643 Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
2644 BinaryOperator *NewStartVal = BinaryOperator::Create(Instruction::Sub,
2645 EndVal, StartVal, "tmp", PreInsertPt);
2646 PHIExpr->setIncomingValue(InBlock, NewStartVal);
2647 Cond->setOperand(1, Zero);
2648 DEBUG(errs() << " New icmp: " << *Cond << "\n");
2650 int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
2651 const SCEV *NewStride = 0;
2653 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2654 const SCEV *OldStride = IU->StrideOrder[i];
2655 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OldStride))
2656 if (SC->getValue()->getSExtValue() == -SInt) {
2658 NewStride = OldStride;
2664 NewStride = SE->getIntegerSCEV(-SInt, Stride->getType());
2665 IU->AddUser(NewStride, CondUse->getOffset(), Cond, Cond->getOperand(0));
2666 IU->IVUsesByStride[Stride]->removeUser(CondUse);
2668 CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
2676 /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
2677 /// when to exit the loop is used only for that purpose, try to rearrange things
2678 /// so it counts down to a test against zero.
2679 bool LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
2680 bool ThisChanged = false;
2681 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2682 const SCEV *Stride = IU->StrideOrder[i];
2683 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2684 IU->IVUsesByStride.find(Stride);
2685 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2686 // FIXME: Generalize to non-affine IV's.
2687 if (!SI->first->isLoopInvariant(L))
2689 // If stride is a constant and it has an icmpinst use, check if we can
2690 // optimize the loop to count down.
2691 if (isa<SCEVConstant>(Stride) && SI->second->Users.size() == 1) {
2692 Instruction *User = SI->second->Users.begin()->getUser();
2693 if (!isa<ICmpInst>(User))
2695 const SCEV *CondStride = Stride;
2696 IVStrideUse *Use = &*SI->second->Users.begin();
2697 if (!OptimizeLoopCountIVOfStride(CondStride, Use, L))
2701 // Now check if it's possible to reuse this iv for other stride uses.
2702 for (unsigned j = 0, ee = IU->StrideOrder.size(); j != ee; ++j) {
2703 const SCEV *SStride = IU->StrideOrder[j];
2704 if (SStride == CondStride)
2706 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SII =
2707 IU->IVUsesByStride.find(SStride);
2708 assert(SII != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2709 // FIXME: Generalize to non-affine IV's.
2710 if (!SII->first->isLoopInvariant(L))
2712 // FIXME: Rewrite other stride using CondStride.
2717 Changed |= ThisChanged;
2721 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2722 IU = &getAnalysis<IVUsers>();
2723 LI = &getAnalysis<LoopInfo>();
2724 SE = &getAnalysis<ScalarEvolution>();
2727 // If LoopSimplify form is not available, stay out of trouble.
2728 if (!L->getLoopPreheader() || !L->getLoopLatch())
2731 if (!IU->IVUsesByStride.empty()) {
2732 DEBUG(errs() << "\nLSR on \"" << L->getHeader()->getParent()->getName()
2736 // Sort the StrideOrder so we process larger strides first.
2737 std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
2740 // Optimize induction variables. Some indvar uses can be transformed to use
2741 // strides that will be needed for other purposes. A common example of this
2742 // is the exit test for the loop, which can often be rewritten to use the
2743 // computation of some other indvar to decide when to terminate the loop.
2746 // Change loop terminating condition to use the postinc iv when possible
2747 // and optimize loop terminating compare. FIXME: Move this after
2748 // StrengthReduceIVUsersOfStride?
2749 OptimizeLoopTermCond(L);
2751 // FIXME: We can shrink overlarge IV's here. e.g. if the code has
2752 // computation in i64 values and the target doesn't support i64, demote
2753 // the computation to 32-bit if safe.
2755 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2756 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2757 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2758 // Need to be careful that IV's are all the same type. Only works for
2759 // intptr_t indvars.
2761 // IVsByStride keeps IVs for one particular loop.
2762 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2764 StrengthReduceIVUsers(L);
2766 // After all sharing is done, see if we can adjust the loop to test against
2767 // zero instead of counting up to a maximum. This is usually faster.
2768 OptimizeLoopCountIV(L);
2771 // We're done analyzing this loop; release all the state we built up for it.
2772 IVsByStride.clear();
2773 StrideNoReuse.clear();
2775 // Clean up after ourselves
2776 if (!DeadInsts.empty())
2777 DeleteTriviallyDeadInstructions();
2779 // At this point, it is worth checking to see if any recurrence PHIs are also
2780 // dead, so that we can remove them as well.
2781 DeleteDeadPHIs(L->getHeader());