1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Type.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Analysis/Dominators.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Analysis/ScalarEvolutionExpander.h"
29 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
30 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Support/CFG.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/GetElementPtrTypeIterator.h"
40 #include "llvm/Target/TargetLowering.h"
44 STATISTIC(NumReduced , "Number of GEPs strength reduced");
45 STATISTIC(NumInserted, "Number of PHIs inserted");
46 STATISTIC(NumVariable, "Number of PHIs with variable strides");
47 STATISTIC(NumEliminated, "Number of strides eliminated");
48 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
49 STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses");
51 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
59 /// IVStrideUse - Keep track of one use of a strided induction variable, where
60 /// the stride is stored externally. The Offset member keeps track of the
61 /// offset from the IV, User is the actual user of the operand, and
62 /// 'OperandValToReplace' is the operand of the User that is the use.
63 struct VISIBILITY_HIDDEN IVStrideUse {
66 Value *OperandValToReplace;
68 // isUseOfPostIncrementedValue - True if this should use the
69 // post-incremented version of this IV, not the preincremented version.
70 // This can only be set in special cases, such as the terminating setcc
71 // instruction for a loop or uses dominated by the loop.
72 bool isUseOfPostIncrementedValue;
74 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
75 : Offset(Offs), User(U), OperandValToReplace(O),
76 isUseOfPostIncrementedValue(false) {}
79 /// IVUsersOfOneStride - This structure keeps track of all instructions that
80 /// have an operand that is based on the trip count multiplied by some stride.
81 /// The stride for all of these users is common and kept external to this
83 struct VISIBILITY_HIDDEN IVUsersOfOneStride {
84 /// Users - Keep track of all of the users of this stride as well as the
85 /// initial value and the operand that uses the IV.
86 std::vector<IVStrideUse> Users;
88 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
89 Users.push_back(IVStrideUse(Offset, User, Operand));
93 /// IVInfo - This structure keeps track of one IV expression inserted during
94 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
95 /// well as the PHI node and increment value created for rewrite.
96 struct VISIBILITY_HIDDEN IVExpr {
102 IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi,
104 : Stride(stride), Base(base), PHI(phi), IncV(incv) {}
107 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
108 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
109 struct VISIBILITY_HIDDEN IVsOfOneStride {
110 std::vector<IVExpr> IVs;
112 void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI,
114 IVs.push_back(IVExpr(Stride, Base, PHI, IncV));
118 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
122 const TargetData *TD;
123 const Type *UIntPtrTy;
126 /// IVUsesByStride - Keep track of all uses of induction variables that we
127 /// are interested in. The key of the map is the stride of the access.
128 std::map<SCEVHandle, IVUsersOfOneStride> IVUsesByStride;
130 /// IVsByStride - Keep track of all IVs that have been inserted for a
131 /// particular stride.
132 std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
134 /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
135 /// We use this to iterate over the IVUsesByStride collection without being
136 /// dependent on random ordering of pointers in the process.
137 SmallVector<SCEVHandle, 16> StrideOrder;
139 /// GEPlist - A list of the GEP's that have been remembered in the SCEV
140 /// data structures. SCEV does not know to update these when the operands
141 /// of the GEP are changed, which means we cannot leave them live across
143 SmallVector<GetElementPtrInst *, 16> GEPlist;
145 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
146 /// of the casted version of each value. This is accessed by
147 /// getCastedVersionOf.
148 DenseMap<Value*, Value*> CastedPointers;
150 /// DeadInsts - Keep track of instructions we may have made dead, so that
151 /// we can remove them after we are done working.
152 SmallVector<Instruction*, 16> DeadInsts;
154 /// TLI - Keep a pointer of a TargetLowering to consult for determining
155 /// transformation profitability.
156 const TargetLowering *TLI;
159 static char ID; // Pass ID, replacement for typeid
160 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
161 LoopPass(&ID), TLI(tli) {
164 bool runOnLoop(Loop *L, LPPassManager &LPM);
166 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
167 // We split critical edges, so we change the CFG. However, we do update
168 // many analyses if they are around.
169 AU.addPreservedID(LoopSimplifyID);
170 AU.addPreserved<LoopInfo>();
171 AU.addPreserved<DominanceFrontier>();
172 AU.addPreserved<DominatorTree>();
174 AU.addRequiredID(LoopSimplifyID);
175 AU.addRequired<LoopInfo>();
176 AU.addRequired<DominatorTree>();
177 AU.addRequired<TargetData>();
178 AU.addRequired<ScalarEvolution>();
179 AU.addPreserved<ScalarEvolution>();
182 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
184 Value *getCastedVersionOf(Instruction::CastOps opcode, Value *V);
186 bool AddUsersIfInteresting(Instruction *I, Loop *L,
187 SmallPtrSet<Instruction*,16> &Processed);
188 SCEVHandle GetExpressionSCEV(Instruction *E);
189 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
190 IVStrideUse* &CondUse,
191 const SCEVHandle* &CondStride);
192 void OptimizeIndvars(Loop *L);
194 /// OptimizeShadowIV - If IV is used in a int-to-float cast
195 /// inside the loop then try to eliminate the cast opeation.
196 void OptimizeShadowIV(Loop *L);
198 /// OptimizeSMax - Rewrite the loop's terminating condition
199 /// if it uses an smax computation.
200 ICmpInst *OptimizeSMax(Loop *L, ICmpInst *Cond,
201 IVStrideUse* &CondUse);
203 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
204 const SCEVHandle *&CondStride);
205 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
206 SCEVHandle CheckForIVReuse(bool, bool, bool, const SCEVHandle&,
207 IVExpr&, const Type*,
208 const std::vector<BasedUser>& UsersToProcess);
209 bool ValidStride(bool, int64_t,
210 const std::vector<BasedUser>& UsersToProcess);
211 SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
212 IVUsersOfOneStride &Uses,
214 bool &AllUsesAreAddresses,
215 bool &AllUsesAreOutsideLoop,
216 std::vector<BasedUser> &UsersToProcess);
217 bool ShouldUseFullStrengthReductionMode(
218 const std::vector<BasedUser> &UsersToProcess,
220 bool AllUsesAreAddresses,
222 void PrepareToStrengthReduceFully(
223 std::vector<BasedUser> &UsersToProcess,
225 SCEVHandle CommonExprs,
227 SCEVExpander &PreheaderRewriter);
228 void PrepareToStrengthReduceFromSmallerStride(
229 std::vector<BasedUser> &UsersToProcess,
231 const IVExpr &ReuseIV,
232 Instruction *PreInsertPt);
233 void PrepareToStrengthReduceWithNewPhi(
234 std::vector<BasedUser> &UsersToProcess,
236 SCEVHandle CommonExprs,
239 SCEVExpander &PreheaderRewriter);
240 void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
241 IVUsersOfOneStride &Uses,
242 Loop *L, bool isOnlyStride);
243 void DeleteTriviallyDeadInstructions();
247 char LoopStrengthReduce::ID = 0;
248 static RegisterPass<LoopStrengthReduce>
249 X("loop-reduce", "Loop Strength Reduction");
251 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
252 return new LoopStrengthReduce(TLI);
255 /// getCastedVersionOf - Return the specified value casted to uintptr_t. This
256 /// assumes that the Value* V is of integer or pointer type only.
258 Value *LoopStrengthReduce::getCastedVersionOf(Instruction::CastOps opcode,
260 if (V->getType() == UIntPtrTy) return V;
261 if (Constant *CB = dyn_cast<Constant>(V))
262 return ConstantExpr::getCast(opcode, CB, UIntPtrTy);
264 Value *&New = CastedPointers[V];
267 New = SCEVExpander::InsertCastOfTo(opcode, V, UIntPtrTy);
268 DeadInsts.push_back(cast<Instruction>(New));
273 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
274 /// specified set are trivially dead, delete them and see if this makes any of
275 /// their operands subsequently dead.
276 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
277 if (DeadInsts.empty()) return;
279 // Sort the deadinsts list so that we can trivially eliminate duplicates as we
280 // go. The code below never adds a non-dead instruction to the worklist, but
281 // callers may not be so careful.
282 array_pod_sort(DeadInsts.begin(), DeadInsts.end());
284 // Drop duplicate instructions and those with uses.
285 for (unsigned i = 0, e = DeadInsts.size()-1; i < e; ++i) {
286 Instruction *I = DeadInsts[i];
287 if (!I->use_empty()) DeadInsts[i] = 0;
288 while (i != e && DeadInsts[i+1] == I)
292 while (!DeadInsts.empty()) {
293 Instruction *I = DeadInsts.back();
294 DeadInsts.pop_back();
296 if (I == 0 || !isInstructionTriviallyDead(I))
299 SE->deleteValueFromRecords(I);
301 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
302 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
305 DeadInsts.push_back(U);
309 I->eraseFromParent();
315 /// GetExpressionSCEV - Compute and return the SCEV for the specified
317 SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
318 // Pointer to pointer bitcast instructions return the same value as their
320 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Exp)) {
321 if (SE->hasSCEV(BCI) || !isa<Instruction>(BCI->getOperand(0)))
322 return SE->getSCEV(BCI);
323 SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)));
328 // Scalar Evolutions doesn't know how to compute SCEV's for GEP instructions.
329 // If this is a GEP that SE doesn't know about, compute it now and insert it.
330 // If this is not a GEP, or if we have already done this computation, just let
332 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Exp);
333 if (!GEP || SE->hasSCEV(GEP))
334 return SE->getSCEV(Exp);
336 // Analyze all of the subscripts of this getelementptr instruction, looking
337 // for uses that are determined by the trip count of the loop. First, skip
338 // all operands the are not dependent on the IV.
340 // Build up the base expression. Insert an LLVM cast of the pointer to
342 SCEVHandle GEPVal = SE->getUnknown(
343 getCastedVersionOf(Instruction::PtrToInt, GEP->getOperand(0)));
345 gep_type_iterator GTI = gep_type_begin(GEP);
347 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
348 i != e; ++i, ++GTI) {
349 // If this is a use of a recurrence that we can analyze, and it comes before
350 // Op does in the GEP operand list, we will handle this when we process this
352 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
353 const StructLayout *SL = TD->getStructLayout(STy);
354 unsigned Idx = cast<ConstantInt>(*i)->getZExtValue();
355 uint64_t Offset = SL->getElementOffset(Idx);
356 GEPVal = SE->getAddExpr(GEPVal,
357 SE->getIntegerSCEV(Offset, UIntPtrTy));
359 unsigned GEPOpiBits =
360 (*i)->getType()->getPrimitiveSizeInBits();
361 unsigned IntPtrBits = UIntPtrTy->getPrimitiveSizeInBits();
362 Instruction::CastOps opcode = (GEPOpiBits < IntPtrBits ?
363 Instruction::SExt : (GEPOpiBits > IntPtrBits ? Instruction::Trunc :
364 Instruction::BitCast));
365 Value *OpVal = getCastedVersionOf(opcode, *i);
366 SCEVHandle Idx = SE->getSCEV(OpVal);
368 uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType());
370 Idx = SE->getMulExpr(Idx,
371 SE->getConstant(ConstantInt::get(UIntPtrTy,
373 GEPVal = SE->getAddExpr(GEPVal, Idx);
377 SE->setSCEV(GEP, GEPVal);
378 GEPlist.push_back(GEP);
382 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
383 /// subexpression that is an AddRec from a loop other than L. An outer loop
384 /// of L is OK, but not an inner loop nor a disjoint loop.
385 static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) {
386 // This is very common, put it first.
387 if (isa<SCEVConstant>(S))
389 if (SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
390 for (unsigned int i=0; i< AE->getNumOperands(); i++)
391 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
395 if (SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
396 if (const Loop *newLoop = AE->getLoop()) {
399 // if newLoop is an outer loop of L, this is OK.
400 if (!LoopInfoBase<BasicBlock>::isNotAlreadyContainedIn(L, newLoop))
405 if (SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
406 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
407 containsAddRecFromDifferentLoop(DE->getRHS(), L);
409 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
410 // need this when it is.
411 if (SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
412 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
413 containsAddRecFromDifferentLoop(DE->getRHS(), L);
415 if (SCEVTruncateExpr *TE = dyn_cast<SCEVTruncateExpr>(S))
416 return containsAddRecFromDifferentLoop(TE->getOperand(), L);
417 if (SCEVZeroExtendExpr *ZE = dyn_cast<SCEVZeroExtendExpr>(S))
418 return containsAddRecFromDifferentLoop(ZE->getOperand(), L);
419 if (SCEVSignExtendExpr *SE = dyn_cast<SCEVSignExtendExpr>(S))
420 return containsAddRecFromDifferentLoop(SE->getOperand(), L);
424 /// getSCEVStartAndStride - Compute the start and stride of this expression,
425 /// returning false if the expression is not a start/stride pair, or true if it
426 /// is. The stride must be a loop invariant expression, but the start may be
427 /// a mix of loop invariant and loop variant expressions. The start cannot,
428 /// however, contain an AddRec from a different loop, unless that loop is an
429 /// outer loop of the current loop.
430 static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
431 SCEVHandle &Start, SCEVHandle &Stride,
432 ScalarEvolution *SE, DominatorTree *DT) {
433 SCEVHandle TheAddRec = Start; // Initialize to zero.
435 // If the outer level is an AddExpr, the operands are all start values except
436 // for a nested AddRecExpr.
437 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
438 for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
439 if (SCEVAddRecExpr *AddRec =
440 dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
441 if (AddRec->getLoop() == L)
442 TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
444 return false; // Nested IV of some sort?
446 Start = SE->getAddExpr(Start, AE->getOperand(i));
449 } else if (isa<SCEVAddRecExpr>(SH)) {
452 return false; // not analyzable.
455 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
456 if (!AddRec || AddRec->getLoop() != L) return false;
458 // FIXME: Generalize to non-affine IV's.
459 if (!AddRec->isAffine()) return false;
461 // If Start contains an SCEVAddRecExpr from a different loop, other than an
462 // outer loop of the current loop, reject it. SCEV has no concept of
463 // operating on one loop at a time so don't confuse it with such expressions.
464 if (containsAddRecFromDifferentLoop(AddRec->getOperand(0), L))
467 Start = SE->getAddExpr(Start, AddRec->getOperand(0));
469 if (!isa<SCEVConstant>(AddRec->getOperand(1))) {
470 // If stride is an instruction, make sure it dominates the loop preheader.
471 // Otherwise we could end up with a use before def situation.
472 BasicBlock *Preheader = L->getLoopPreheader();
473 if (!AddRec->getOperand(1)->dominates(Preheader, DT))
476 DOUT << "[" << L->getHeader()->getName()
477 << "] Variable stride: " << *AddRec << "\n";
480 Stride = AddRec->getOperand(1);
484 /// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
485 /// and now we need to decide whether the user should use the preinc or post-inc
486 /// value. If this user should use the post-inc version of the IV, return true.
488 /// Choosing wrong here can break dominance properties (if we choose to use the
489 /// post-inc value when we cannot) or it can end up adding extra live-ranges to
490 /// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
491 /// should use the post-inc value).
492 static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
493 Loop *L, DominatorTree *DT, Pass *P,
494 SmallVectorImpl<Instruction*> &DeadInsts){
495 // If the user is in the loop, use the preinc value.
496 if (L->contains(User->getParent())) return false;
498 BasicBlock *LatchBlock = L->getLoopLatch();
500 // Ok, the user is outside of the loop. If it is dominated by the latch
501 // block, use the post-inc value.
502 if (DT->dominates(LatchBlock, User->getParent()))
505 // There is one case we have to be careful of: PHI nodes. These little guys
506 // can live in blocks that do not dominate the latch block, but (since their
507 // uses occur in the predecessor block, not the block the PHI lives in) should
508 // still use the post-inc value. Check for this case now.
509 PHINode *PN = dyn_cast<PHINode>(User);
510 if (!PN) return false; // not a phi, not dominated by latch block.
512 // Look at all of the uses of IV by the PHI node. If any use corresponds to
513 // a block that is not dominated by the latch block, give up and use the
514 // preincremented value.
515 unsigned NumUses = 0;
516 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
517 if (PN->getIncomingValue(i) == IV) {
519 if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
523 // Okay, all uses of IV by PN are in predecessor blocks that really are
524 // dominated by the latch block. Split the critical edges and use the
525 // post-incremented value.
526 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
527 if (PN->getIncomingValue(i) == IV) {
528 SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
529 // Splitting the critical edge can reduce the number of entries in this
531 e = PN->getNumIncomingValues();
532 if (--NumUses == 0) break;
535 // PHI node might have become a constant value after SplitCriticalEdge.
536 DeadInsts.push_back(User);
541 /// isAddressUse - Returns true if the specified instruction is using the
542 /// specified value as an address.
543 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
544 bool isAddress = isa<LoadInst>(Inst);
545 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
546 if (SI->getOperand(1) == OperandVal)
548 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
549 // Addressing modes can also be folded into prefetches and a variety
551 switch (II->getIntrinsicID()) {
553 case Intrinsic::prefetch:
554 case Intrinsic::x86_sse2_loadu_dq:
555 case Intrinsic::x86_sse2_loadu_pd:
556 case Intrinsic::x86_sse_loadu_ps:
557 case Intrinsic::x86_sse_storeu_ps:
558 case Intrinsic::x86_sse2_storeu_pd:
559 case Intrinsic::x86_sse2_storeu_dq:
560 case Intrinsic::x86_sse2_storel_dq:
561 if (II->getOperand(1) == OperandVal)
569 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
570 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
571 /// return true. Otherwise, return false.
572 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
573 SmallPtrSet<Instruction*,16> &Processed) {
574 if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
575 return false; // Void and FP expressions cannot be reduced.
576 if (!Processed.insert(I))
577 return true; // Instruction already handled.
579 // Get the symbolic expression for this instruction.
580 SCEVHandle ISE = GetExpressionSCEV(I);
581 if (isa<SCEVCouldNotCompute>(ISE)) return false;
583 // Get the start and stride for this expression.
584 SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType());
585 SCEVHandle Stride = Start;
586 if (!getSCEVStartAndStride(ISE, L, Start, Stride, SE, DT))
587 return false; // Non-reducible symbolic expression, bail out.
589 std::vector<Instruction *> IUsers;
590 // Collect all I uses now because IVUseShouldUsePostIncValue may
591 // invalidate use_iterator.
592 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
593 IUsers.push_back(cast<Instruction>(*UI));
595 for (unsigned iused_index = 0, iused_size = IUsers.size();
596 iused_index != iused_size; ++iused_index) {
598 Instruction *User = IUsers[iused_index];
600 // Do not infinitely recurse on PHI nodes.
601 if (isa<PHINode>(User) && Processed.count(User))
604 // Descend recursively, but not into PHI nodes outside the current loop.
605 // It's important to see the entire expression outside the loop to get
606 // choices that depend on addressing mode use right, although we won't
607 // consider references ouside the loop in all cases.
608 // If User is already in Processed, we don't want to recurse into it again,
609 // but do want to record a second reference in the same instruction.
610 bool AddUserToIVUsers = false;
611 if (LI->getLoopFor(User->getParent()) != L) {
612 if (isa<PHINode>(User) || Processed.count(User) ||
613 !AddUsersIfInteresting(User, L, Processed)) {
614 DOUT << "FOUND USER in other loop: " << *User
615 << " OF SCEV: " << *ISE << "\n";
616 AddUserToIVUsers = true;
618 } else if (Processed.count(User) ||
619 !AddUsersIfInteresting(User, L, Processed)) {
620 DOUT << "FOUND USER: " << *User
621 << " OF SCEV: " << *ISE << "\n";
622 AddUserToIVUsers = true;
625 if (AddUserToIVUsers) {
626 IVUsersOfOneStride &StrideUses = IVUsesByStride[Stride];
627 if (StrideUses.Users.empty()) // First occurrence of this stride?
628 StrideOrder.push_back(Stride);
630 // Okay, we found a user that we cannot reduce. Analyze the instruction
631 // and decide what to do with it. If we are a use inside of the loop, use
632 // the value before incrementation, otherwise use it after incrementation.
633 if (IVUseShouldUsePostIncValue(User, I, L, DT, this, DeadInsts)) {
634 // The value used will be incremented by the stride more than we are
635 // expecting, so subtract this off.
636 SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
637 StrideUses.addUser(NewStart, User, I);
638 StrideUses.Users.back().isUseOfPostIncrementedValue = true;
639 DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n";
641 StrideUses.addUser(Start, User, I);
649 /// BasedUser - For a particular base value, keep information about how we've
650 /// partitioned the expression so far.
652 /// SE - The current ScalarEvolution object.
655 /// Base - The Base value for the PHI node that needs to be inserted for
656 /// this use. As the use is processed, information gets moved from this
657 /// field to the Imm field (below). BasedUser values are sorted by this
661 /// Inst - The instruction using the induction variable.
664 /// OperandValToReplace - The operand value of Inst to replace with the
666 Value *OperandValToReplace;
668 /// Imm - The immediate value that should be added to the base immediately
669 /// before Inst, because it will be folded into the imm field of the
670 /// instruction. This is also sometimes used for loop-variant values that
671 /// must be added inside the loop.
674 /// Phi - The induction variable that performs the striding that
675 /// should be used for this user.
678 /// IncV - The post-incremented value of Phi.
681 // isUseOfPostIncrementedValue - True if this should use the
682 // post-incremented version of this IV, not the preincremented version.
683 // This can only be set in special cases, such as the terminating setcc
684 // instruction for a loop and uses outside the loop that are dominated by
686 bool isUseOfPostIncrementedValue;
688 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
689 : SE(se), Base(IVSU.Offset), Inst(IVSU.User),
690 OperandValToReplace(IVSU.OperandValToReplace),
691 Imm(SE->getIntegerSCEV(0, Base->getType())),
692 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue) {}
694 // Once we rewrite the code to insert the new IVs we want, update the
695 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
697 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
698 Instruction *InsertPt,
699 SCEVExpander &Rewriter, Loop *L, Pass *P,
700 SmallVectorImpl<Instruction*> &DeadInsts);
702 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
703 SCEVExpander &Rewriter,
704 Instruction *IP, Loop *L);
709 void BasedUser::dump() const {
710 cerr << " Base=" << *Base;
711 cerr << " Imm=" << *Imm;
712 cerr << " Inst: " << *Inst;
715 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
716 SCEVExpander &Rewriter,
717 Instruction *IP, Loop *L) {
718 // Figure out where we *really* want to insert this code. In particular, if
719 // the user is inside of a loop that is nested inside of L, we really don't
720 // want to insert this expression before the user, we'd rather pull it out as
721 // many loops as possible.
722 LoopInfo &LI = Rewriter.getLoopInfo();
723 Instruction *BaseInsertPt = IP;
725 // Figure out the most-nested loop that IP is in.
726 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
728 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
729 // the preheader of the outer-most loop where NewBase is not loop invariant.
730 if (L->contains(IP->getParent()))
731 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
732 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
733 InsertLoop = InsertLoop->getParentLoop();
736 Value *Base = Rewriter.expandCodeFor(NewBase, BaseInsertPt);
738 // If there is no immediate value, skip the next part.
742 // If we are inserting the base and imm values in the same block, make sure to
743 // adjust the IP position if insertion reused a result.
744 if (IP == BaseInsertPt)
745 IP = Rewriter.getInsertionPoint();
747 // Always emit the immediate (if non-zero) into the same block as the user.
748 SCEVHandle NewValSCEV = SE->getAddExpr(SE->getUnknown(Base), Imm);
749 return Rewriter.expandCodeFor(NewValSCEV, IP);
754 // Once we rewrite the code to insert the new IVs we want, update the
755 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
756 // to it. NewBasePt is the last instruction which contributes to the
757 // value of NewBase in the case that it's a diffferent instruction from
758 // the PHI that NewBase is computed from, or null otherwise.
760 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
761 Instruction *NewBasePt,
762 SCEVExpander &Rewriter, Loop *L, Pass *P,
763 SmallVectorImpl<Instruction*> &DeadInsts){
764 if (!isa<PHINode>(Inst)) {
765 // By default, insert code at the user instruction.
766 BasicBlock::iterator InsertPt = Inst;
768 // However, if the Operand is itself an instruction, the (potentially
769 // complex) inserted code may be shared by many users. Because of this, we
770 // want to emit code for the computation of the operand right before its old
771 // computation. This is usually safe, because we obviously used to use the
772 // computation when it was computed in its current block. However, in some
773 // cases (e.g. use of a post-incremented induction variable) the NewBase
774 // value will be pinned to live somewhere after the original computation.
775 // In this case, we have to back off.
777 // If this is a use outside the loop (which means after, since it is based
778 // on a loop indvar) we use the post-incremented value, so that we don't
779 // artificially make the preinc value live out the bottom of the loop.
780 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
781 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
782 InsertPt = NewBasePt;
784 } else if (Instruction *OpInst
785 = dyn_cast<Instruction>(OperandValToReplace)) {
787 while (isa<PHINode>(InsertPt)) ++InsertPt;
790 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
791 // Adjust the type back to match the Inst. Note that we can't use InsertPt
792 // here because the SCEVExpander may have inserted the instructions after
793 // that point, in its efforts to avoid inserting redundant expressions.
794 if (isa<PointerType>(OperandValToReplace->getType())) {
795 NewVal = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
797 OperandValToReplace->getType());
799 // Replace the use of the operand Value with the new Phi we just created.
800 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
802 DOUT << " Replacing with ";
803 DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false));
804 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
808 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
809 // expression into each operand block that uses it. Note that PHI nodes can
810 // have multiple entries for the same predecessor. We use a map to make sure
811 // that a PHI node only has a single Value* for each predecessor (which also
812 // prevents us from inserting duplicate code in some blocks).
813 DenseMap<BasicBlock*, Value*> InsertedCode;
814 PHINode *PN = cast<PHINode>(Inst);
815 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
816 if (PN->getIncomingValue(i) == OperandValToReplace) {
817 // If the original expression is outside the loop, put the replacement
818 // code in the same place as the original expression,
819 // which need not be an immediate predecessor of this PHI. This way we
820 // need only one copy of it even if it is referenced multiple times in
821 // the PHI. We don't do this when the original expression is inside the
822 // loop because multiple copies sometimes do useful sinking of code in
824 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
825 if (L->contains(OldLoc->getParent())) {
826 // If this is a critical edge, split the edge so that we do not insert
827 // the code on all predecessor/successor paths. We do this unless this
828 // is the canonical backedge for this loop, as this can make some
829 // inserted code be in an illegal position.
830 BasicBlock *PHIPred = PN->getIncomingBlock(i);
831 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
832 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
834 // First step, split the critical edge.
835 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
837 // Next step: move the basic block. In particular, if the PHI node
838 // is outside of the loop, and PredTI is in the loop, we want to
839 // move the block to be immediately before the PHI block, not
840 // immediately after PredTI.
841 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
842 BasicBlock *NewBB = PN->getIncomingBlock(i);
843 NewBB->moveBefore(PN->getParent());
846 // Splitting the edge can reduce the number of PHI entries we have.
847 e = PN->getNumIncomingValues();
850 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
852 // Insert the code into the end of the predecessor block.
853 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
854 PN->getIncomingBlock(i)->getTerminator() :
855 OldLoc->getParent()->getTerminator();
856 Code = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
858 // Adjust the type back to match the PHI. Note that we can't use
859 // InsertPt here because the SCEVExpander may have inserted its
860 // instructions after that point, in its efforts to avoid inserting
861 // redundant expressions.
862 if (isa<PointerType>(PN->getType())) {
863 Code = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
868 DOUT << " Changing PHI use to ";
869 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
870 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
873 // Replace the use of the operand Value with the new Phi we just created.
874 PN->setIncomingValue(i, Code);
879 // PHI node might have become a constant value after SplitCriticalEdge.
880 DeadInsts.push_back(Inst);
884 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
885 /// mode, and does not need to be put in a register first.
886 static bool fitsInAddressMode(const SCEVHandle &V, const Type *UseTy,
887 const TargetLowering *TLI, bool HasBaseReg) {
888 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
889 int64_t VC = SC->getValue()->getSExtValue();
891 TargetLowering::AddrMode AM;
893 AM.HasBaseReg = HasBaseReg;
894 return TLI->isLegalAddressingMode(AM, UseTy);
896 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
897 return (VC > -(1 << 16) && VC < (1 << 16)-1);
901 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
902 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
903 if (TLI && CE->getOpcode() == Instruction::PtrToInt) {
904 Constant *Op0 = CE->getOperand(0);
905 if (GlobalValue *GV = dyn_cast<GlobalValue>(Op0)) {
906 TargetLowering::AddrMode AM;
908 AM.HasBaseReg = HasBaseReg;
909 return TLI->isLegalAddressingMode(AM, UseTy);
915 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
916 /// loop varying to the Imm operand.
917 static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm,
918 Loop *L, ScalarEvolution *SE) {
919 if (Val->isLoopInvariant(L)) return; // Nothing to do.
921 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
922 std::vector<SCEVHandle> NewOps;
923 NewOps.reserve(SAE->getNumOperands());
925 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
926 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
927 // If this is a loop-variant expression, it must stay in the immediate
928 // field of the expression.
929 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
931 NewOps.push_back(SAE->getOperand(i));
935 Val = SE->getIntegerSCEV(0, Val->getType());
937 Val = SE->getAddExpr(NewOps);
938 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
939 // Try to pull immediates out of the start value of nested addrec's.
940 SCEVHandle Start = SARE->getStart();
941 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
943 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
945 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
947 // Otherwise, all of Val is variant, move the whole thing over.
948 Imm = SE->getAddExpr(Imm, Val);
949 Val = SE->getIntegerSCEV(0, Val->getType());
954 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
955 /// that can fit into the immediate field of instructions in the target.
956 /// Accumulate these immediate values into the Imm value.
957 static void MoveImmediateValues(const TargetLowering *TLI,
959 SCEVHandle &Val, SCEVHandle &Imm,
960 bool isAddress, Loop *L,
961 ScalarEvolution *SE) {
962 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
963 std::vector<SCEVHandle> NewOps;
964 NewOps.reserve(SAE->getNumOperands());
966 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
967 SCEVHandle NewOp = SAE->getOperand(i);
968 MoveImmediateValues(TLI, UseTy, NewOp, Imm, isAddress, L, SE);
970 if (!NewOp->isLoopInvariant(L)) {
971 // If this is a loop-variant expression, it must stay in the immediate
972 // field of the expression.
973 Imm = SE->getAddExpr(Imm, NewOp);
975 NewOps.push_back(NewOp);
980 Val = SE->getIntegerSCEV(0, Val->getType());
982 Val = SE->getAddExpr(NewOps);
984 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
985 // Try to pull immediates out of the start value of nested addrec's.
986 SCEVHandle Start = SARE->getStart();
987 MoveImmediateValues(TLI, UseTy, Start, Imm, isAddress, L, SE);
989 if (Start != SARE->getStart()) {
990 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
992 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
995 } else if (SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
996 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
997 if (isAddress && fitsInAddressMode(SME->getOperand(0), UseTy, TLI, false) &&
998 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
1000 SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType());
1001 SCEVHandle NewOp = SME->getOperand(1);
1002 MoveImmediateValues(TLI, UseTy, NewOp, SubImm, isAddress, L, SE);
1004 // If we extracted something out of the subexpressions, see if we can
1006 if (NewOp != SME->getOperand(1)) {
1007 // Scale SubImm up by "8". If the result is a target constant, we are
1009 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
1010 if (fitsInAddressMode(SubImm, UseTy, TLI, false)) {
1011 // Accumulate the immediate.
1012 Imm = SE->getAddExpr(Imm, SubImm);
1014 // Update what is left of 'Val'.
1015 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
1022 // Loop-variant expressions must stay in the immediate field of the
1024 if ((isAddress && fitsInAddressMode(Val, UseTy, TLI, false)) ||
1025 !Val->isLoopInvariant(L)) {
1026 Imm = SE->getAddExpr(Imm, Val);
1027 Val = SE->getIntegerSCEV(0, Val->getType());
1031 // Otherwise, no immediates to move.
1034 static void MoveImmediateValues(const TargetLowering *TLI,
1036 SCEVHandle &Val, SCEVHandle &Imm,
1037 bool isAddress, Loop *L,
1038 ScalarEvolution *SE) {
1039 const Type *UseTy = User->getType();
1040 if (StoreInst *SI = dyn_cast<StoreInst>(User))
1041 UseTy = SI->getOperand(0)->getType();
1042 MoveImmediateValues(TLI, UseTy, Val, Imm, isAddress, L, SE);
1045 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
1046 /// added together. This is used to reassociate common addition subexprs
1047 /// together for maximal sharing when rewriting bases.
1048 static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
1050 ScalarEvolution *SE) {
1051 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
1052 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
1053 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
1054 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
1055 SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
1056 if (SARE->getOperand(0) == Zero) {
1057 SubExprs.push_back(Expr);
1059 // Compute the addrec with zero as its base.
1060 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
1061 Ops[0] = Zero; // Start with zero base.
1062 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
1065 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
1067 } else if (!Expr->isZero()) {
1069 SubExprs.push_back(Expr);
1073 // This is logically local to the following function, but C++ says we have
1074 // to make it file scope.
1075 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
1077 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
1078 /// the Uses, removing any common subexpressions, except that if all such
1079 /// subexpressions can be folded into an addressing mode for all uses inside
1080 /// the loop (this case is referred to as "free" in comments herein) we do
1081 /// not remove anything. This looks for things like (a+b+c) and
1082 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
1083 /// is *removed* from the Bases and returned.
1085 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
1086 ScalarEvolution *SE, Loop *L,
1087 const TargetLowering *TLI) {
1088 unsigned NumUses = Uses.size();
1090 // Only one use? This is a very common case, so we handle it specially and
1092 SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
1093 SCEVHandle Result = Zero;
1094 SCEVHandle FreeResult = Zero;
1096 // If the use is inside the loop, use its base, regardless of what it is:
1097 // it is clearly shared across all the IV's. If the use is outside the loop
1098 // (which means after it) we don't want to factor anything *into* the loop,
1099 // so just use 0 as the base.
1100 if (L->contains(Uses[0].Inst->getParent()))
1101 std::swap(Result, Uses[0].Base);
1105 // To find common subexpressions, count how many of Uses use each expression.
1106 // If any subexpressions are used Uses.size() times, they are common.
1107 // Also track whether all uses of each expression can be moved into an
1108 // an addressing mode "for free"; such expressions are left within the loop.
1109 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
1110 std::map<SCEVHandle, SubExprUseData> SubExpressionUseData;
1112 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
1113 // order we see them.
1114 std::vector<SCEVHandle> UniqueSubExprs;
1116 std::vector<SCEVHandle> SubExprs;
1117 unsigned NumUsesInsideLoop = 0;
1118 for (unsigned i = 0; i != NumUses; ++i) {
1119 // If the user is outside the loop, just ignore it for base computation.
1120 // Since the user is outside the loop, it must be *after* the loop (if it
1121 // were before, it could not be based on the loop IV). We don't want users
1122 // after the loop to affect base computation of values *inside* the loop,
1123 // because we can always add their offsets to the result IV after the loop
1124 // is done, ensuring we get good code inside the loop.
1125 if (!L->contains(Uses[i].Inst->getParent()))
1127 NumUsesInsideLoop++;
1129 // If the base is zero (which is common), return zero now, there are no
1130 // CSEs we can find.
1131 if (Uses[i].Base == Zero) return Zero;
1133 // If this use is as an address we may be able to put CSEs in the addressing
1134 // mode rather than hoisting them.
1135 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
1136 // We may need the UseTy below, but only when isAddrUse, so compute it
1137 // only in that case.
1138 const Type *UseTy = 0;
1140 UseTy = Uses[i].Inst->getType();
1141 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1142 UseTy = SI->getOperand(0)->getType();
1145 // Split the expression into subexprs.
1146 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1147 // Add one to SubExpressionUseData.Count for each subexpr present, and
1148 // if the subexpr is not a valid immediate within an addressing mode use,
1149 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
1150 // hoist these out of the loop (if they are common to all uses).
1151 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1152 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
1153 UniqueSubExprs.push_back(SubExprs[j]);
1154 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], UseTy, TLI, false))
1155 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
1160 // Now that we know how many times each is used, build Result. Iterate over
1161 // UniqueSubexprs so that we have a stable ordering.
1162 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
1163 std::map<SCEVHandle, SubExprUseData>::iterator I =
1164 SubExpressionUseData.find(UniqueSubExprs[i]);
1165 assert(I != SubExpressionUseData.end() && "Entry not found?");
1166 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
1167 if (I->second.notAllUsesAreFree)
1168 Result = SE->getAddExpr(Result, I->first);
1170 FreeResult = SE->getAddExpr(FreeResult, I->first);
1172 // Remove non-cse's from SubExpressionUseData.
1173 SubExpressionUseData.erase(I);
1176 if (FreeResult != Zero) {
1177 // We have some subexpressions that can be subsumed into addressing
1178 // modes in every use inside the loop. However, it's possible that
1179 // there are so many of them that the combined FreeResult cannot
1180 // be subsumed, or that the target cannot handle both a FreeResult
1181 // and a Result in the same instruction (for example because it would
1182 // require too many registers). Check this.
1183 for (unsigned i=0; i<NumUses; ++i) {
1184 if (!L->contains(Uses[i].Inst->getParent()))
1186 // We know this is an addressing mode use; if there are any uses that
1187 // are not, FreeResult would be Zero.
1188 const Type *UseTy = Uses[i].Inst->getType();
1189 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1190 UseTy = SI->getOperand(0)->getType();
1191 if (!fitsInAddressMode(FreeResult, UseTy, TLI, Result!=Zero)) {
1192 // FIXME: could split up FreeResult into pieces here, some hoisted
1193 // and some not. There is no obvious advantage to this.
1194 Result = SE->getAddExpr(Result, FreeResult);
1201 // If we found no CSE's, return now.
1202 if (Result == Zero) return Result;
1204 // If we still have a FreeResult, remove its subexpressions from
1205 // SubExpressionUseData. This means they will remain in the use Bases.
1206 if (FreeResult != Zero) {
1207 SeparateSubExprs(SubExprs, FreeResult, SE);
1208 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1209 std::map<SCEVHandle, SubExprUseData>::iterator I =
1210 SubExpressionUseData.find(SubExprs[j]);
1211 SubExpressionUseData.erase(I);
1216 // Otherwise, remove all of the CSE's we found from each of the base values.
1217 for (unsigned i = 0; i != NumUses; ++i) {
1218 // Uses outside the loop don't necessarily include the common base, but
1219 // the final IV value coming into those uses does. Instead of trying to
1220 // remove the pieces of the common base, which might not be there,
1221 // subtract off the base to compensate for this.
1222 if (!L->contains(Uses[i].Inst->getParent())) {
1223 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
1227 // Split the expression into subexprs.
1228 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1230 // Remove any common subexpressions.
1231 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
1232 if (SubExpressionUseData.count(SubExprs[j])) {
1233 SubExprs.erase(SubExprs.begin()+j);
1237 // Finally, add the non-shared expressions together.
1238 if (SubExprs.empty())
1239 Uses[i].Base = Zero;
1241 Uses[i].Base = SE->getAddExpr(SubExprs);
1248 /// ValidStride - Check whether the given Scale is valid for all loads and
1249 /// stores in UsersToProcess.
1251 bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
1253 const std::vector<BasedUser>& UsersToProcess) {
1257 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
1258 // If this is a load or other access, pass the type of the access in.
1259 const Type *AccessTy = Type::VoidTy;
1260 if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].Inst))
1261 AccessTy = SI->getOperand(0)->getType();
1262 else if (LoadInst *LI = dyn_cast<LoadInst>(UsersToProcess[i].Inst))
1263 AccessTy = LI->getType();
1264 else if (isa<PHINode>(UsersToProcess[i].Inst))
1267 TargetLowering::AddrMode AM;
1268 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
1269 AM.BaseOffs = SC->getValue()->getSExtValue();
1270 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
1273 // If load[imm+r*scale] is illegal, bail out.
1274 if (!TLI->isLegalAddressingMode(AM, AccessTy))
1280 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
1282 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
1286 if (Ty1->canLosslesslyBitCastTo(Ty2))
1288 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
1290 if (isa<PointerType>(Ty2) && Ty1->canLosslesslyBitCastTo(UIntPtrTy))
1292 if (isa<PointerType>(Ty1) && Ty2->canLosslesslyBitCastTo(UIntPtrTy))
1297 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
1298 /// of a previous stride and it is a legal value for the target addressing
1299 /// mode scale component and optional base reg. This allows the users of
1300 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
1301 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
1303 /// If all uses are outside the loop, we don't require that all multiplies
1304 /// be folded into the addressing mode, nor even that the factor be constant;
1305 /// a multiply (executed once) outside the loop is better than another IV
1306 /// within. Well, usually.
1307 SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
1308 bool AllUsesAreAddresses,
1309 bool AllUsesAreOutsideLoop,
1310 const SCEVHandle &Stride,
1311 IVExpr &IV, const Type *Ty,
1312 const std::vector<BasedUser>& UsersToProcess) {
1313 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
1314 int64_t SInt = SC->getValue()->getSExtValue();
1315 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1317 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1318 IVsByStride.find(StrideOrder[NewStride]);
1319 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1321 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1322 if (SI->first != Stride &&
1323 (unsigned(abs(SInt)) < SSInt || (SInt % SSInt) != 0))
1325 int64_t Scale = SInt / SSInt;
1326 // Check that this stride is valid for all the types used for loads and
1327 // stores; if it can be used for some and not others, we might as well use
1328 // the original stride everywhere, since we have to create the IV for it
1329 // anyway. If the scale is 1, then we don't need to worry about folding
1332 (AllUsesAreAddresses &&
1333 ValidStride(HasBaseReg, Scale, UsersToProcess)))
1334 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1335 IE = SI->second.IVs.end(); II != IE; ++II)
1336 // FIXME: Only handle base == 0 for now.
1337 // Only reuse previous IV if it would not require a type conversion.
1338 if (II->Base->isZero() &&
1339 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1341 return SE->getIntegerSCEV(Scale, Stride->getType());
1344 } else if (AllUsesAreOutsideLoop) {
1345 // Accept nonconstant strides here; it is really really right to substitute
1346 // an existing IV if we can.
1347 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1349 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1350 IVsByStride.find(StrideOrder[NewStride]);
1351 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1353 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1354 if (SI->first != Stride && SSInt != 1)
1356 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1357 IE = SI->second.IVs.end(); II != IE; ++II)
1358 // Accept nonzero base here.
1359 // Only reuse previous IV if it would not require a type conversion.
1360 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1365 // Special case, old IV is -1*x and this one is x. Can treat this one as
1367 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1369 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1370 IVsByStride.find(StrideOrder[NewStride]);
1371 if (SI == IVsByStride.end())
1373 if (SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1374 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1375 if (Stride == ME->getOperand(1) &&
1376 SC->getValue()->getSExtValue() == -1LL)
1377 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1378 IE = SI->second.IVs.end(); II != IE; ++II)
1379 // Accept nonzero base here.
1380 // Only reuse previous IV if it would not require type conversion.
1381 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1383 return SE->getIntegerSCEV(-1LL, Stride->getType());
1387 return SE->getIntegerSCEV(0, Stride->getType());
1390 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1391 /// returns true if Val's isUseOfPostIncrementedValue is true.
1392 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1393 return Val.isUseOfPostIncrementedValue;
1396 /// isNonConstantNegative - Return true if the specified scev is negated, but
1398 static bool isNonConstantNegative(const SCEVHandle &Expr) {
1399 SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1400 if (!Mul) return false;
1402 // If there is a constant factor, it will be first.
1403 SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1404 if (!SC) return false;
1406 // Return true if the value is negative, this matches things like (-42 * V).
1407 return SC->getValue()->getValue().isNegative();
1410 // CollectIVUsers - Transform our list of users and offsets to a bit more
1411 // complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1412 // of the strided accesses, as well as the old information from Uses. We
1413 // progressively move information from the Base field to the Imm field, until
1414 // we eventually have the full access expression to rewrite the use.
1415 SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride,
1416 IVUsersOfOneStride &Uses,
1418 bool &AllUsesAreAddresses,
1419 bool &AllUsesAreOutsideLoop,
1420 std::vector<BasedUser> &UsersToProcess) {
1421 UsersToProcess.reserve(Uses.Users.size());
1422 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i) {
1423 UsersToProcess.push_back(BasedUser(Uses.Users[i], SE));
1425 // Move any loop variant operands from the offset field to the immediate
1426 // field of the use, so that we don't try to use something before it is
1428 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1429 UsersToProcess.back().Imm, L, SE);
1430 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1431 "Base value is not loop invariant!");
1434 // We now have a whole bunch of uses of like-strided induction variables, but
1435 // they might all have different bases. We want to emit one PHI node for this
1436 // stride which we fold as many common expressions (between the IVs) into as
1437 // possible. Start by identifying the common expressions in the base values
1438 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1439 // "A+B"), emit it to the preheader, then remove the expression from the
1440 // UsersToProcess base values.
1441 SCEVHandle CommonExprs =
1442 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1444 // Next, figure out what we can represent in the immediate fields of
1445 // instructions. If we can represent anything there, move it to the imm
1446 // fields of the BasedUsers. We do this so that it increases the commonality
1447 // of the remaining uses.
1448 unsigned NumPHI = 0;
1449 bool HasAddress = false;
1450 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1451 // If the user is not in the current loop, this means it is using the exit
1452 // value of the IV. Do not put anything in the base, make sure it's all in
1453 // the immediate field to allow as much factoring as possible.
1454 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1455 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1456 UsersToProcess[i].Base);
1457 UsersToProcess[i].Base =
1458 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1460 // Not all uses are outside the loop.
1461 AllUsesAreOutsideLoop = false;
1463 // Addressing modes can be folded into loads and stores. Be careful that
1464 // the store is through the expression, not of the expression though.
1466 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1467 UsersToProcess[i].OperandValToReplace);
1468 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1476 // If this use isn't an address, then not all uses are addresses.
1477 if (!isAddress && !isPHI)
1478 AllUsesAreAddresses = false;
1480 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1481 UsersToProcess[i].Imm, isAddress, L, SE);
1485 // If one of the use is a PHI node and all other uses are addresses, still
1486 // allow iv reuse. Essentially we are trading one constant multiplication
1487 // for one fewer iv.
1489 AllUsesAreAddresses = false;
1491 // There are no in-loop address uses.
1492 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1493 AllUsesAreAddresses = false;
1498 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1499 /// is valid and profitable for the given set of users of a stride. In
1500 /// full strength-reduction mode, all addresses at the current stride are
1501 /// strength-reduced all the way down to pointer arithmetic.
1503 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1504 const std::vector<BasedUser> &UsersToProcess,
1506 bool AllUsesAreAddresses,
1507 SCEVHandle Stride) {
1508 if (!EnableFullLSRMode)
1511 // The heuristics below aim to avoid increasing register pressure, but
1512 // fully strength-reducing all the addresses increases the number of
1513 // add instructions, so don't do this when optimizing for size.
1514 // TODO: If the loop is large, the savings due to simpler addresses
1515 // may oughtweight the costs of the extra increment instructions.
1516 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1519 // TODO: For now, don't do full strength reduction if there could
1520 // potentially be greater-stride multiples of the current stride
1521 // which could reuse the current stride IV.
1522 if (StrideOrder.back() != Stride)
1525 // Iterate through the uses to find conditions that automatically rule out
1527 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1528 SCEV *Base = UsersToProcess[i].Base;
1529 SCEV *Imm = UsersToProcess[i].Imm;
1530 // If any users have a loop-variant component, they can't be fully
1531 // strength-reduced.
1532 if (Imm && !Imm->isLoopInvariant(L))
1534 // If there are to users with the same base and the difference between
1535 // the two Imm values can't be folded into the address, full
1536 // strength reduction would increase register pressure.
1538 SCEV *CurImm = UsersToProcess[i].Imm;
1539 if ((CurImm || Imm) && CurImm != Imm) {
1540 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1541 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
1542 const Instruction *Inst = UsersToProcess[i].Inst;
1543 const Type *UseTy = Inst->getType();
1544 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
1545 UseTy = SI->getOperand(0)->getType();
1546 SCEVHandle Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1547 if (!Diff->isZero() &&
1548 (!AllUsesAreAddresses ||
1549 !fitsInAddressMode(Diff, UseTy, TLI, /*HasBaseReg=*/true)))
1552 } while (++i != e && Base == UsersToProcess[i].Base);
1555 // If there's exactly one user in this stride, fully strength-reducing it
1556 // won't increase register pressure. If it's starting from a non-zero base,
1557 // it'll be simpler this way.
1558 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1561 // Otherwise, if there are any users in this stride that don't require
1562 // a register for their base, full strength-reduction will increase
1563 // register pressure.
1564 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1565 if (UsersToProcess[i].Base->isZero())
1568 // Otherwise, go for it.
1572 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1573 /// with the specified start and step values in the specified loop.
1575 /// If NegateStride is true, the stride should be negated by using a
1576 /// subtract instead of an add.
1578 /// Return the created phi node, and return the step instruction by
1579 /// reference in IncV.
1581 static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
1583 SCEVExpander &Rewriter,
1585 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1586 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1588 BasicBlock *Header = L->getHeader();
1589 BasicBlock *Preheader = L->getLoopPreheader();
1591 PHINode *PN = PHINode::Create(Start->getType(), "lsr.iv", Header->begin());
1592 PN->addIncoming(Rewriter.expandCodeFor(Start, Preheader->getTerminator()),
1595 pred_iterator HPI = pred_begin(Header);
1596 assert(HPI != pred_end(Header) && "Loop with zero preds???");
1597 if (!L->contains(*HPI)) ++HPI;
1598 assert(HPI != pred_end(Header) && L->contains(*HPI) &&
1599 "No backedge in loop?");
1601 // If the stride is negative, insert a sub instead of an add for the
1603 bool isNegative = isNonConstantNegative(Step);
1604 SCEVHandle IncAmount = Step;
1606 IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1608 // Insert an add instruction right before the terminator corresponding
1609 // to the back-edge.
1610 Value *StepV = Rewriter.expandCodeFor(IncAmount, Preheader->getTerminator());
1612 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1613 (*HPI)->getTerminator());
1615 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1616 (*HPI)->getTerminator());
1618 if (!isa<ConstantInt>(StepV)) ++NumVariable;
1620 pred_iterator PI = pred_begin(Header);
1621 if (*PI == L->getLoopPreheader())
1623 PN->addIncoming(IncV, *PI);
1629 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1630 // We want to emit code for users inside the loop first. To do this, we
1631 // rearrange BasedUser so that the entries at the end have
1632 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1633 // vector (so we handle them first).
1634 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1635 PartitionByIsUseOfPostIncrementedValue);
1637 // Sort this by base, so that things with the same base are handled
1638 // together. By partitioning first and stable-sorting later, we are
1639 // guaranteed that within each base we will pop off users from within the
1640 // loop before users outside of the loop with a particular base.
1642 // We would like to use stable_sort here, but we can't. The problem is that
1643 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1644 // we don't have anything to do a '<' comparison on. Because we think the
1645 // number of uses is small, do a horrible bubble sort which just relies on
1647 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1648 // Get a base value.
1649 SCEVHandle Base = UsersToProcess[i].Base;
1651 // Compact everything with this base to be consecutive with this one.
1652 for (unsigned j = i+1; j != e; ++j) {
1653 if (UsersToProcess[j].Base == Base) {
1654 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1661 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1662 /// UsersToProcess, meaning lowering addresses all the way down to direct
1663 /// pointer arithmetic.
1666 LoopStrengthReduce::PrepareToStrengthReduceFully(
1667 std::vector<BasedUser> &UsersToProcess,
1669 SCEVHandle CommonExprs,
1671 SCEVExpander &PreheaderRewriter) {
1672 DOUT << " Fully reducing all users\n";
1674 // Rewrite the UsersToProcess records, creating a separate PHI for each
1675 // unique Base value.
1676 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1677 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1678 // pick the first Imm value here to start with, and adjust it for the
1680 SCEVHandle Imm = UsersToProcess[i].Imm;
1681 SCEVHandle Base = UsersToProcess[i].Base;
1682 SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
1684 PHINode *Phi = InsertAffinePhi(Start, Stride, L,
1687 // Loop over all the users with the same base.
1689 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1690 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1691 UsersToProcess[i].Phi = Phi;
1692 UsersToProcess[i].IncV = IncV;
1693 assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1694 "ShouldUseFullStrengthReductionMode should reject this!");
1695 } while (++i != e && Base == UsersToProcess[i].Base);
1699 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1700 /// given users to share.
1703 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1704 std::vector<BasedUser> &UsersToProcess,
1706 SCEVHandle CommonExprs,
1709 SCEVExpander &PreheaderRewriter) {
1710 DOUT << " Inserting new PHI:\n";
1713 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1718 // Remember this in case a later stride is multiple of this.
1719 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi, IncV);
1721 // All the users will share this new IV.
1722 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1723 UsersToProcess[i].Phi = Phi;
1724 UsersToProcess[i].IncV = IncV;
1728 DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false));
1730 DEBUG(WriteAsOperand(*DOUT, IncV, /*PrintType=*/false));
1734 /// PrepareToStrengthReduceWithNewPhi - Prepare for the given users to reuse
1735 /// an induction variable with a stride that is a factor of the current
1736 /// induction variable.
1739 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1740 std::vector<BasedUser> &UsersToProcess,
1742 const IVExpr &ReuseIV,
1743 Instruction *PreInsertPt) {
1744 DOUT << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride
1745 << " and BASE " << *ReuseIV.Base << "\n";
1747 // All the users will share the reused IV.
1748 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1749 UsersToProcess[i].Phi = ReuseIV.PHI;
1750 UsersToProcess[i].IncV = ReuseIV.IncV;
1753 Constant *C = dyn_cast<Constant>(CommonBaseV);
1755 (!C->isNullValue() &&
1756 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1758 // We want the common base emitted into the preheader! This is just
1759 // using cast as a copy so BitCast (no-op cast) is appropriate
1760 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1761 "commonbase", PreInsertPt);
1764 static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1765 const Type *ReplacedTy,
1766 std::vector<BasedUser> &UsersToProcess,
1767 const TargetLowering *TLI) {
1768 SmallVector<Instruction*, 16> AddrModeInsts;
1769 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1770 if (UsersToProcess[i].isUseOfPostIncrementedValue)
1772 ExtAddrMode AddrMode =
1773 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1774 ReplacedTy, UsersToProcess[i].Inst,
1775 AddrModeInsts, *TLI);
1776 if (GV && GV != AddrMode.BaseGV)
1778 if (Offset && !AddrMode.BaseOffs)
1779 // FIXME: How to accurate check it's immediate offset is folded.
1781 AddrModeInsts.clear();
1786 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1787 /// stride of IV. All of the users may have different starting values, and this
1788 /// may not be the only stride (we know it is if isOnlyStride is true).
1789 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
1790 IVUsersOfOneStride &Uses,
1792 bool isOnlyStride) {
1793 // If all the users are moved to another stride, then there is nothing to do.
1794 if (Uses.Users.empty())
1797 // Keep track if every use in UsersToProcess is an address. If they all are,
1798 // we may be able to rewrite the entire collection of them in terms of a
1799 // smaller-stride IV.
1800 bool AllUsesAreAddresses = true;
1802 // Keep track if every use of a single stride is outside the loop. If so,
1803 // we want to be more aggressive about reusing a smaller-stride IV; a
1804 // multiply outside the loop is better than another IV inside. Well, usually.
1805 bool AllUsesAreOutsideLoop = true;
1807 // Transform our list of users and offsets to a bit more complex table. In
1808 // this new vector, each 'BasedUser' contains 'Base' the base of the
1809 // strided accessas well as the old information from Uses. We progressively
1810 // move information from the Base field to the Imm field, until we eventually
1811 // have the full access expression to rewrite the use.
1812 std::vector<BasedUser> UsersToProcess;
1813 SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1814 AllUsesAreOutsideLoop,
1817 // Sort the UsersToProcess array so that users with common bases are
1818 // next to each other.
1819 SortUsersToProcess(UsersToProcess);
1821 // If we managed to find some expressions in common, we'll need to carry
1822 // their value in a register and add it in for each use. This will take up
1823 // a register operand, which potentially restricts what stride values are
1825 bool HaveCommonExprs = !CommonExprs->isZero();
1827 const Type *ReplacedTy = CommonExprs->getType();
1829 // If all uses are addresses, consider sinking the immediate part of the
1830 // common expression back into uses if they can fit in the immediate fields.
1831 if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1832 SCEVHandle NewCommon = CommonExprs;
1833 SCEVHandle Imm = SE->getIntegerSCEV(0, ReplacedTy);
1834 MoveImmediateValues(TLI, ReplacedTy, NewCommon, Imm, true, L, SE);
1835 if (!Imm->isZero()) {
1838 // If the immediate part of the common expression is a GV, check if it's
1839 // possible to fold it into the target addressing mode.
1840 GlobalValue *GV = 0;
1841 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm)) {
1842 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
1843 if (CE->getOpcode() == Instruction::PtrToInt)
1844 GV = dyn_cast<GlobalValue>(CE->getOperand(0));
1847 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1848 Offset = SC->getValue()->getSExtValue();
1850 DoSink = IsImmFoldedIntoAddrMode(GV, Offset, ReplacedTy,
1851 UsersToProcess, TLI);
1854 DOUT << " Sinking " << *Imm << " back down into uses\n";
1855 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1856 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1857 CommonExprs = NewCommon;
1858 HaveCommonExprs = !CommonExprs->isZero();
1864 // Now that we know what we need to do, insert the PHI node itself.
1866 DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1868 << " Common base: " << *CommonExprs << "\n";
1870 SCEVExpander Rewriter(*SE, *LI);
1871 SCEVExpander PreheaderRewriter(*SE, *LI);
1873 BasicBlock *Preheader = L->getLoopPreheader();
1874 Instruction *PreInsertPt = Preheader->getTerminator();
1875 BasicBlock *LatchBlock = L->getLoopLatch();
1877 Value *CommonBaseV = ConstantInt::get(ReplacedTy, 0);
1879 SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1880 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1881 SE->getIntegerSCEV(0, Type::Int32Ty),
1884 /// Choose a strength-reduction strategy and prepare for it by creating
1885 /// the necessary PHIs and adjusting the bookkeeping.
1886 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1887 AllUsesAreAddresses, Stride)) {
1888 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1891 // Emit the initial base value into the loop preheader.
1892 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, PreInsertPt);
1894 // If all uses are addresses, check if it is possible to reuse an IV with a
1895 // stride that is a factor of this stride. And that the multiple is a number
1896 // that can be encoded in the scale field of the target addressing mode. And
1897 // that we will have a valid instruction after this substition, including
1898 // the immediate field, if any.
1899 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1900 AllUsesAreOutsideLoop,
1901 Stride, ReuseIV, CommonExprs->getType(),
1903 if (isa<SCEVConstant>(RewriteFactor) &&
1904 cast<SCEVConstant>(RewriteFactor)->isZero())
1905 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1906 CommonBaseV, L, PreheaderRewriter);
1908 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1909 ReuseIV, PreInsertPt);
1912 // Process all the users now, replacing their strided uses with
1913 // strength-reduced forms. This outer loop handles all bases, the inner
1914 // loop handles all users of a particular base.
1915 while (!UsersToProcess.empty()) {
1916 SCEVHandle Base = UsersToProcess.back().Base;
1917 Instruction *Inst = UsersToProcess.back().Inst;
1919 // Emit the code for Base into the preheader.
1920 Value *BaseV = PreheaderRewriter.expandCodeFor(Base, PreInsertPt);
1922 DOUT << " Examining uses with BASE ";
1923 DEBUG(WriteAsOperand(*DOUT, BaseV, /*PrintType=*/false));
1926 // If BaseV is a constant other than 0, make sure that it gets inserted into
1927 // the preheader, instead of being forward substituted into the uses. We do
1928 // this by forcing a BitCast (noop cast) to be inserted into the preheader
1930 if (Constant *C = dyn_cast<Constant>(BaseV)) {
1931 if (!C->isNullValue() && !fitsInAddressMode(Base, ReplacedTy,
1933 // We want this constant emitted into the preheader! This is just
1934 // using cast as a copy so BitCast (no-op cast) is appropriate
1935 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1940 // Emit the code to add the immediate offset to the Phi value, just before
1941 // the instructions that we identified as using this stride and base.
1943 // FIXME: Use emitted users to emit other users.
1944 BasedUser &User = UsersToProcess.back();
1946 DOUT << " Examining use ";
1947 DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
1948 /*PrintType=*/false));
1949 DOUT << " in Inst: " << *Inst;
1951 // If this instruction wants to use the post-incremented value, move it
1952 // after the post-inc and use its value instead of the PHI.
1953 Value *RewriteOp = User.Phi;
1954 if (User.isUseOfPostIncrementedValue) {
1955 RewriteOp = User.IncV;
1957 // If this user is in the loop, make sure it is the last thing in the
1958 // loop to ensure it is dominated by the increment.
1959 if (L->contains(User.Inst->getParent()))
1960 User.Inst->moveBefore(LatchBlock->getTerminator());
1962 if (RewriteOp->getType() != ReplacedTy) {
1963 Instruction::CastOps opcode = Instruction::Trunc;
1964 if (ReplacedTy->getPrimitiveSizeInBits() ==
1965 RewriteOp->getType()->getPrimitiveSizeInBits())
1966 opcode = Instruction::BitCast;
1967 RewriteOp = SCEVExpander::InsertCastOfTo(opcode, RewriteOp, ReplacedTy);
1970 SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
1972 // If we had to insert new instructions for RewriteOp, we have to
1973 // consider that they may not have been able to end up immediately
1974 // next to RewriteOp, because non-PHI instructions may never precede
1975 // PHI instructions in a block. In this case, remember where the last
1976 // instruction was inserted so that if we're replacing a different
1977 // PHI node, we can use the later point to expand the final
1979 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1980 if (RewriteOp == User.Phi) NewBasePt = 0;
1982 // Clear the SCEVExpander's expression map so that we are guaranteed
1983 // to have the code emitted where we expect it.
1986 // If we are reusing the iv, then it must be multiplied by a constant
1987 // factor to take advantage of the addressing mode scale component.
1988 if (!isa<SCEVConstant>(RewriteFactor) ||
1989 !cast<SCEVConstant>(RewriteFactor)->isZero()) {
1990 // If we're reusing an IV with a nonzero base (currently this happens
1991 // only when all reuses are outside the loop) subtract that base here.
1992 // The base has been used to initialize the PHI node but we don't want
1994 if (!ReuseIV.Base->isZero()) {
1995 SCEVHandle typedBase = ReuseIV.Base;
1996 if (RewriteExpr->getType()->getPrimitiveSizeInBits() !=
1997 ReuseIV.Base->getType()->getPrimitiveSizeInBits()) {
1998 // It's possible the original IV is a larger type than the new IV,
1999 // in which case we have to truncate the Base. We checked in
2000 // RequiresTypeConversion that this is valid.
2001 assert (RewriteExpr->getType()->getPrimitiveSizeInBits() <
2002 ReuseIV.Base->getType()->getPrimitiveSizeInBits() &&
2003 "Unexpected lengthening conversion!");
2004 typedBase = SE->getTruncateExpr(ReuseIV.Base,
2005 RewriteExpr->getType());
2007 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
2010 // Multiply old variable, with base removed, by new scale factor.
2011 RewriteExpr = SE->getMulExpr(RewriteFactor,
2014 // The common base is emitted in the loop preheader. But since we
2015 // are reusing an IV, it has not been used to initialize the PHI node.
2016 // Add it to the expression used to rewrite the uses.
2017 // When this use is outside the loop, we earlier subtracted the
2018 // common base, and are adding it back here. Use the same expression
2019 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
2020 if (!isa<ConstantInt>(CommonBaseV) ||
2021 !cast<ConstantInt>(CommonBaseV)->isZero()) {
2022 if (L->contains(User.Inst->getParent()))
2023 RewriteExpr = SE->getAddExpr(RewriteExpr,
2024 SE->getUnknown(CommonBaseV));
2026 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
2030 // Now that we know what we need to do, insert code before User for the
2031 // immediate and any loop-variant expressions.
2032 if (!isa<ConstantInt>(BaseV) || !cast<ConstantInt>(BaseV)->isZero())
2033 // Add BaseV to the PHI value if needed.
2034 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
2036 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
2040 // Mark old value we replaced as possibly dead, so that it is eliminated
2041 // if we just replaced the last use of that value.
2042 DeadInsts.push_back(cast<Instruction>(User.OperandValToReplace));
2044 UsersToProcess.pop_back();
2047 // If there are any more users to process with the same base, process them
2048 // now. We sorted by base above, so we just have to check the last elt.
2049 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
2050 // TODO: Next, find out which base index is the most common, pull it out.
2053 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
2054 // different starting values, into different PHIs.
2057 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
2058 /// set the IV user and stride information and return true, otherwise return
2060 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
2061 const SCEVHandle *&CondStride) {
2062 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e && !CondUse;
2064 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2065 IVUsesByStride.find(StrideOrder[Stride]);
2066 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2068 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2069 E = SI->second.Users.end(); UI != E; ++UI)
2070 if (UI->User == Cond) {
2071 // NOTE: we could handle setcc instructions with multiple uses here, but
2072 // InstCombine does it as well for simple uses, it's not clear that it
2073 // occurs enough in real life to handle.
2075 CondStride = &SI->first;
2083 // Constant strides come first which in turns are sorted by their absolute
2084 // values. If absolute values are the same, then positive strides comes first.
2086 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
2087 struct StrideCompare {
2088 bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
2089 SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
2090 SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
2092 int64_t LV = LHSC->getValue()->getSExtValue();
2093 int64_t RV = RHSC->getValue()->getSExtValue();
2094 uint64_t ALV = (LV < 0) ? -LV : LV;
2095 uint64_t ARV = (RV < 0) ? -RV : RV;
2103 // If it's the same value but different type, sort by bit width so
2104 // that we emit larger induction variables before smaller
2105 // ones, letting the smaller be re-written in terms of larger ones.
2106 return RHS->getBitWidth() < LHS->getBitWidth();
2108 return LHSC && !RHSC;
2113 /// ChangeCompareStride - If a loop termination compare instruction is the
2114 /// only use of its stride, and the compaison is against a constant value,
2115 /// try eliminate the stride by moving the compare instruction to another
2116 /// stride and change its constant operand accordingly. e.g.
2122 /// if (v2 < 10) goto loop
2127 /// if (v1 < 30) goto loop
2128 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
2129 IVStrideUse* &CondUse,
2130 const SCEVHandle* &CondStride) {
2131 if (StrideOrder.size() < 2 ||
2132 IVUsesByStride[*CondStride].Users.size() != 1)
2134 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
2135 if (!SC) return Cond;
2137 ICmpInst::Predicate Predicate = Cond->getPredicate();
2138 int64_t CmpSSInt = SC->getValue()->getSExtValue();
2139 unsigned BitWidth = (*CondStride)->getBitWidth();
2140 uint64_t SignBit = 1ULL << (BitWidth-1);
2141 const Type *CmpTy = Cond->getOperand(0)->getType();
2142 const Type *NewCmpTy = NULL;
2143 unsigned TyBits = CmpTy->getPrimitiveSizeInBits();
2144 unsigned NewTyBits = 0;
2145 SCEVHandle *NewStride = NULL;
2146 Value *NewCmpLHS = NULL;
2147 Value *NewCmpRHS = NULL;
2149 SCEVHandle NewOffset = SE->getIntegerSCEV(0, UIntPtrTy);
2150 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
2152 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
2153 int64_t CmpVal = C->getValue().getSExtValue();
2155 // Check stride constant and the comparision constant signs to detect
2157 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
2160 // Look for a suitable stride / iv as replacement.
2161 for (unsigned i = 0, e = StrideOrder.size(); i != e; ++i) {
2162 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2163 IVUsesByStride.find(StrideOrder[i]);
2164 if (!isa<SCEVConstant>(SI->first))
2166 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
2167 if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
2170 Scale = SSInt / CmpSSInt;
2171 int64_t NewCmpVal = CmpVal * Scale;
2172 APInt Mul = APInt(BitWidth, NewCmpVal);
2173 // Check for overflow.
2174 if (Mul.getSExtValue() != NewCmpVal)
2177 // Watch out for overflow.
2178 if (ICmpInst::isSignedPredicate(Predicate) &&
2179 (CmpVal & SignBit) != (NewCmpVal & SignBit))
2182 if (NewCmpVal == CmpVal)
2184 // Pick the best iv to use trying to avoid a cast.
2186 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2187 E = SI->second.Users.end(); UI != E; ++UI) {
2188 NewCmpLHS = UI->OperandValToReplace;
2189 if (NewCmpLHS->getType() == CmpTy)
2195 NewCmpTy = NewCmpLHS->getType();
2196 NewTyBits = isa<PointerType>(NewCmpTy)
2197 ? UIntPtrTy->getPrimitiveSizeInBits()
2198 : NewCmpTy->getPrimitiveSizeInBits();
2199 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
2200 // Check if it is possible to rewrite it using
2201 // an iv / stride of a smaller integer type.
2202 bool TruncOk = false;
2203 if (NewCmpTy->isInteger()) {
2204 unsigned Bits = NewTyBits;
2205 if (ICmpInst::isSignedPredicate(Predicate))
2207 uint64_t Mask = (1ULL << Bits) - 1;
2208 if (((uint64_t)NewCmpVal & Mask) == (uint64_t)NewCmpVal)
2215 // Don't rewrite if use offset is non-constant and the new type is
2216 // of a different type.
2217 // FIXME: too conservative?
2218 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->Offset))
2221 bool AllUsesAreAddresses = true;
2222 bool AllUsesAreOutsideLoop = true;
2223 std::vector<BasedUser> UsersToProcess;
2224 SCEVHandle CommonExprs = CollectIVUsers(SI->first, SI->second, L,
2225 AllUsesAreAddresses,
2226 AllUsesAreOutsideLoop,
2228 // Avoid rewriting the compare instruction with an iv of new stride
2229 // if it's likely the new stride uses will be rewritten using the
2230 // stride of the compare instruction.
2231 if (AllUsesAreAddresses &&
2232 ValidStride(!CommonExprs->isZero(), Scale, UsersToProcess))
2235 // If scale is negative, use swapped predicate unless it's testing
2237 if (Scale < 0 && !Cond->isEquality())
2238 Predicate = ICmpInst::getSwappedPredicate(Predicate);
2240 NewStride = &StrideOrder[i];
2241 if (!isa<PointerType>(NewCmpTy))
2242 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
2244 NewCmpRHS = ConstantInt::get(UIntPtrTy, NewCmpVal);
2245 NewCmpRHS = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
2246 NewCmpRHS, NewCmpTy);
2248 NewOffset = TyBits == NewTyBits
2249 ? SE->getMulExpr(CondUse->Offset,
2250 SE->getConstant(ConstantInt::get(CmpTy, Scale)))
2251 : SE->getConstant(ConstantInt::get(NewCmpTy,
2252 cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
2257 // Forgo this transformation if it the increment happens to be
2258 // unfortunately positioned after the condition, and the condition
2259 // has multiple uses which prevent it from being moved immediately
2260 // before the branch. See
2261 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2262 // for an example of this situation.
2263 if (!Cond->hasOneUse()) {
2264 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2271 // Create a new compare instruction using new stride / iv.
2272 ICmpInst *OldCond = Cond;
2273 // Insert new compare instruction.
2274 Cond = new ICmpInst(Predicate, NewCmpLHS, NewCmpRHS,
2275 L->getHeader()->getName() + ".termcond",
2278 // Remove the old compare instruction. The old indvar is probably dead too.
2279 DeadInsts.push_back(cast<Instruction>(CondUse->OperandValToReplace));
2280 SE->deleteValueFromRecords(OldCond);
2281 OldCond->replaceAllUsesWith(Cond);
2282 OldCond->eraseFromParent();
2284 IVUsesByStride[*CondStride].Users.pop_back();
2285 IVUsesByStride[*NewStride].addUser(NewOffset, Cond, NewCmpLHS);
2286 CondUse = &IVUsesByStride[*NewStride].Users.back();
2287 CondStride = NewStride;
2294 /// OptimizeSMax - Rewrite the loop's terminating condition if it uses
2295 /// an smax computation.
2297 /// This is a narrow solution to a specific, but acute, problem. For loops
2303 /// } while (++i < n);
2305 /// where the comparison is signed, the trip count isn't just 'n', because
2306 /// 'n' could be negative. And unfortunately this can come up even for loops
2307 /// where the user didn't use a C do-while loop. For example, seemingly
2308 /// well-behaved top-test loops will commonly be lowered like this:
2314 /// } while (++i < n);
2317 /// and then it's possible for subsequent optimization to obscure the if
2318 /// test in such a way that indvars can't find it.
2320 /// When indvars can't find the if test in loops like this, it creates a
2321 /// signed-max expression, which allows it to give the loop a canonical
2322 /// induction variable:
2325 /// smax = n < 1 ? 1 : n;
2328 /// } while (++i != smax);
2330 /// Canonical induction variables are necessary because the loop passes
2331 /// are designed around them. The most obvious example of this is the
2332 /// LoopInfo analysis, which doesn't remember trip count values. It
2333 /// expects to be able to rediscover the trip count each time it is
2334 /// needed, and it does this using a simple analyis that only succeeds if
2335 /// the loop has a canonical induction variable.
2337 /// However, when it comes time to generate code, the maximum operation
2338 /// can be quite costly, especially if it's inside of an outer loop.
2340 /// This function solves this problem by detecting this type of loop and
2341 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2342 /// the instructions for the maximum computation.
2344 ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond,
2345 IVStrideUse* &CondUse) {
2346 // Check that the loop matches the pattern we're looking for.
2347 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2348 Cond->getPredicate() != CmpInst::ICMP_NE)
2351 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2352 if (!Sel || !Sel->hasOneUse()) return Cond;
2354 SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2355 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2357 SCEVHandle One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2359 // Add one to the backedge-taken count to get the trip count.
2360 SCEVHandle IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2362 // Check for a max calculation that matches the pattern.
2363 SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(IterationCount);
2364 if (!SMax || SMax != SE->getSCEV(Sel)) return Cond;
2366 SCEVHandle SMaxLHS = SMax->getOperand(0);
2367 SCEVHandle SMaxRHS = SMax->getOperand(1);
2368 if (!SMaxLHS || SMaxLHS != One) return Cond;
2370 // Check the relevant induction variable for conformance to
2372 SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
2373 SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2374 if (!AR || !AR->isAffine() ||
2375 AR->getStart() != One ||
2376 AR->getStepRecurrence(*SE) != One)
2379 // Check the right operand of the select, and remember it, as it will
2380 // be used in the new comparison instruction.
2382 if (SE->getSCEV(Sel->getOperand(1)) == SMaxRHS)
2383 NewRHS = Sel->getOperand(1);
2384 else if (SE->getSCEV(Sel->getOperand(2)) == SMaxRHS)
2385 NewRHS = Sel->getOperand(2);
2386 if (!NewRHS) return Cond;
2388 // Ok, everything looks ok to change the condition into an SLT or SGE and
2389 // delete the max calculation.
2391 new ICmpInst(Cond->getPredicate() == CmpInst::ICMP_NE ?
2394 Cond->getOperand(0), NewRHS, "scmp", Cond);
2396 // Delete the max calculation instructions.
2397 SE->deleteValueFromRecords(Cond);
2398 Cond->replaceAllUsesWith(NewCond);
2399 Cond->eraseFromParent();
2400 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2401 SE->deleteValueFromRecords(Sel);
2402 Sel->eraseFromParent();
2403 if (Cmp->use_empty()) {
2404 SE->deleteValueFromRecords(Cmp);
2405 Cmp->eraseFromParent();
2407 CondUse->User = NewCond;
2411 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2412 /// inside the loop then try to eliminate the cast opeation.
2413 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2415 SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2416 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2419 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e;
2421 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2422 IVUsesByStride.find(StrideOrder[Stride]);
2423 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2424 if (!isa<SCEVConstant>(SI->first))
2427 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2428 E = SI->second.Users.end(); UI != E; /* empty */) {
2429 std::vector<IVStrideUse>::iterator CandidateUI = UI;
2431 Instruction *ShadowUse = CandidateUI->User;
2432 const Type *DestTy = NULL;
2434 /* If shadow use is a int->float cast then insert a second IV
2435 to eliminate this cast.
2437 for (unsigned i = 0; i < n; ++i)
2443 for (unsigned i = 0; i < n; ++i, ++d)
2446 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->User))
2447 DestTy = UCast->getDestTy();
2448 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->User))
2449 DestTy = SCast->getDestTy();
2450 if (!DestTy) continue;
2453 /* If target does not support DestTy natively then do not apply
2454 this transformation. */
2455 MVT DVT = TLI->getValueType(DestTy);
2456 if (!TLI->isTypeLegal(DVT)) continue;
2459 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2461 if (PH->getNumIncomingValues() != 2) continue;
2463 const Type *SrcTy = PH->getType();
2464 int Mantissa = DestTy->getFPMantissaWidth();
2465 if (Mantissa == -1) continue;
2466 if ((int)TD->getTypeSizeInBits(SrcTy) > Mantissa)
2469 unsigned Entry, Latch;
2470 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2478 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2479 if (!Init) continue;
2480 ConstantFP *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2482 BinaryOperator *Incr =
2483 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2484 if (!Incr) continue;
2485 if (Incr->getOpcode() != Instruction::Add
2486 && Incr->getOpcode() != Instruction::Sub)
2489 /* Initialize new IV, double d = 0.0 in above example. */
2490 ConstantInt *C = NULL;
2491 if (Incr->getOperand(0) == PH)
2492 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2493 else if (Incr->getOperand(1) == PH)
2494 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2500 /* Add new PHINode. */
2501 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2503 /* create new increment. '++d' in above example. */
2504 ConstantFP *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2505 BinaryOperator *NewIncr =
2506 BinaryOperator::Create(Incr->getOpcode(),
2507 NewPH, CFP, "IV.S.next.", Incr);
2509 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2510 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2512 /* Remove cast operation */
2513 SE->deleteValueFromRecords(ShadowUse);
2514 ShadowUse->replaceAllUsesWith(NewPH);
2515 ShadowUse->eraseFromParent();
2516 SI->second.Users.erase(CandidateUI);
2523 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2524 // uses in the loop, look to see if we can eliminate some, in favor of using
2525 // common indvars for the different uses.
2526 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2527 // TODO: implement optzns here.
2529 OptimizeShadowIV(L);
2531 // Finally, get the terminating condition for the loop if possible. If we
2532 // can, we want to change it to use a post-incremented version of its
2533 // induction variable, to allow coalescing the live ranges for the IV into
2534 // one register value.
2535 PHINode *SomePHI = cast<PHINode>(L->getHeader()->begin());
2536 BasicBlock *Preheader = L->getLoopPreheader();
2537 BasicBlock *LatchBlock =
2538 SomePHI->getIncomingBlock(SomePHI->getIncomingBlock(0) == Preheader);
2539 BranchInst *TermBr = dyn_cast<BranchInst>(LatchBlock->getTerminator());
2540 if (!TermBr || TermBr->isUnconditional() ||
2541 !isa<ICmpInst>(TermBr->getCondition()))
2543 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2545 // Search IVUsesByStride to find Cond's IVUse if there is one.
2546 IVStrideUse *CondUse = 0;
2547 const SCEVHandle *CondStride = 0;
2549 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2550 return; // setcc doesn't use the IV.
2552 // If the trip count is computed in terms of an smax (due to ScalarEvolution
2553 // being unable to find a sufficient guard, for example), change the loop
2554 // comparison to use SLT instead of NE.
2555 Cond = OptimizeSMax(L, Cond, CondUse);
2557 // If possible, change stride and operands of the compare instruction to
2558 // eliminate one stride.
2559 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2561 // It's possible for the setcc instruction to be anywhere in the loop, and
2562 // possible for it to have multiple users. If it is not immediately before
2563 // the latch block branch, move it.
2564 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2565 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2566 Cond->moveBefore(TermBr);
2568 // Otherwise, clone the terminating condition and insert into the loopend.
2569 Cond = cast<ICmpInst>(Cond->clone());
2570 Cond->setName(L->getHeader()->getName() + ".termcond");
2571 LatchBlock->getInstList().insert(TermBr, Cond);
2573 // Clone the IVUse, as the old use still exists!
2574 IVUsesByStride[*CondStride].addUser(CondUse->Offset, Cond,
2575 CondUse->OperandValToReplace);
2576 CondUse = &IVUsesByStride[*CondStride].Users.back();
2580 // If we get to here, we know that we can transform the setcc instruction to
2581 // use the post-incremented version of the IV, allowing us to coalesce the
2582 // live ranges for the IV correctly.
2583 CondUse->Offset = SE->getMinusSCEV(CondUse->Offset, *CondStride);
2584 CondUse->isUseOfPostIncrementedValue = true;
2588 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2590 LI = &getAnalysis<LoopInfo>();
2591 DT = &getAnalysis<DominatorTree>();
2592 SE = &getAnalysis<ScalarEvolution>();
2593 TD = &getAnalysis<TargetData>();
2594 UIntPtrTy = TD->getIntPtrType();
2597 // Find all uses of induction variables in this loop, and categorize
2598 // them by stride. Start by finding all of the PHI nodes in the header for
2599 // this loop. If they are induction variables, inspect their uses.
2600 SmallPtrSet<Instruction*,16> Processed; // Don't reprocess instructions.
2601 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
2602 AddUsersIfInteresting(I, L, Processed);
2604 if (!IVUsesByStride.empty()) {
2605 // Optimize induction variables. Some indvar uses can be transformed to use
2606 // strides that will be needed for other purposes. A common example of this
2607 // is the exit test for the loop, which can often be rewritten to use the
2608 // computation of some other indvar to decide when to terminate the loop.
2611 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
2612 // doing computation in byte values, promote to 32-bit values if safe.
2614 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2615 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2616 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2617 // Need to be careful that IV's are all the same type. Only works for
2618 // intptr_t indvars.
2620 // If we only have one stride, we can more aggressively eliminate some
2622 bool HasOneStride = IVUsesByStride.size() == 1;
2625 DOUT << "\nLSR on \"" << L->getHeader()->getParent()->getNameStart()
2630 // IVsByStride keeps IVs for one particular loop.
2631 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2633 // Sort the StrideOrder so we process larger strides first.
2634 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
2636 // Note: this processes each stride/type pair individually. All users
2637 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2638 // Also, note that we iterate over IVUsesByStride indirectly by using
2639 // StrideOrder. This extra layer of indirection makes the ordering of
2640 // strides deterministic - not dependent on map order.
2641 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
2642 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2643 IVUsesByStride.find(StrideOrder[Stride]);
2644 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2645 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
2649 // We're done analyzing this loop; release all the state we built up for it.
2650 CastedPointers.clear();
2651 IVUsesByStride.clear();
2652 IVsByStride.clear();
2653 StrideOrder.clear();
2654 for (unsigned i=0; i<GEPlist.size(); i++)
2655 SE->deleteValueFromRecords(GEPlist[i]);
2658 // Clean up after ourselves
2659 if (!DeadInsts.empty()) {
2660 DeleteTriviallyDeadInstructions();
2662 BasicBlock::iterator I = L->getHeader()->begin();
2663 while (PHINode *PN = dyn_cast<PHINode>(I++)) {
2664 // At this point, we know that we have killed one or more IV users.
2665 // It is worth checking to see if the cannonical indvar is also
2666 // dead, so that we can remove it as well.
2668 // We can remove a PHI if it is on a cycle in the def-use graph
2669 // where each node in the cycle has degree one, i.e. only one use,
2670 // and is an instruction with no side effects.
2672 // FIXME: this needs to eliminate an induction variable even if it's being
2673 // compared against some value to decide loop termination.
2674 if (!PN->hasOneUse())
2677 SmallPtrSet<PHINode *, 4> PHIs;
2678 for (Instruction *J = dyn_cast<Instruction>(*PN->use_begin());
2679 J && J->hasOneUse() && !J->mayWriteToMemory();
2680 J = dyn_cast<Instruction>(*J->use_begin())) {
2681 // If we find the original PHI, we've discovered a cycle.
2683 // Break the cycle and mark the PHI for deletion.
2684 SE->deleteValueFromRecords(PN);
2685 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
2686 DeadInsts.push_back(PN);
2690 // If we find a PHI more than once, we're on a cycle that
2691 // won't prove fruitful.
2692 if (isa<PHINode>(J) && !PHIs.insert(cast<PHINode>(J)))
2696 DeleteTriviallyDeadInstructions();