1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Type.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Analysis/Dominators.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Analysis/ScalarEvolutionExpander.h"
29 #include "llvm/Support/CFG.h"
30 #include "llvm/Support/GetElementPtrTypeIterator.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Target/TargetLowering.h"
44 STATISTIC(NumReduced , "Number of GEPs strength reduced");
45 STATISTIC(NumInserted, "Number of PHIs inserted");
46 STATISTIC(NumVariable, "Number of PHIs with variable strides");
47 STATISTIC(NumEliminated, "Number of strides eliminated");
48 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
50 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
58 /// IVStrideUse - Keep track of one use of a strided induction variable, where
59 /// the stride is stored externally. The Offset member keeps track of the
60 /// offset from the IV, User is the actual user of the operand, and
61 /// 'OperandValToReplace' is the operand of the User that is the use.
62 struct VISIBILITY_HIDDEN IVStrideUse {
65 Value *OperandValToReplace;
67 // isUseOfPostIncrementedValue - True if this should use the
68 // post-incremented version of this IV, not the preincremented version.
69 // This can only be set in special cases, such as the terminating setcc
70 // instruction for a loop or uses dominated by the loop.
71 bool isUseOfPostIncrementedValue;
73 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
74 : Offset(Offs), User(U), OperandValToReplace(O),
75 isUseOfPostIncrementedValue(false) {}
78 /// IVUsersOfOneStride - This structure keeps track of all instructions that
79 /// have an operand that is based on the trip count multiplied by some stride.
80 /// The stride for all of these users is common and kept external to this
82 struct VISIBILITY_HIDDEN IVUsersOfOneStride {
83 /// Users - Keep track of all of the users of this stride as well as the
84 /// initial value and the operand that uses the IV.
85 std::vector<IVStrideUse> Users;
87 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
88 Users.push_back(IVStrideUse(Offset, User, Operand));
92 /// IVInfo - This structure keeps track of one IV expression inserted during
93 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
94 /// well as the PHI node and increment value created for rewrite.
95 struct VISIBILITY_HIDDEN IVExpr {
101 IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi,
103 : Stride(stride), Base(base), PHI(phi), IncV(incv) {}
106 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
107 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
108 struct VISIBILITY_HIDDEN IVsOfOneStride {
109 std::vector<IVExpr> IVs;
111 void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI,
113 IVs.push_back(IVExpr(Stride, Base, PHI, IncV));
117 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
121 const TargetData *TD;
122 const Type *UIntPtrTy;
125 /// IVUsesByStride - Keep track of all uses of induction variables that we
126 /// are interested in. The key of the map is the stride of the access.
127 std::map<SCEVHandle, IVUsersOfOneStride> IVUsesByStride;
129 /// IVsByStride - Keep track of all IVs that have been inserted for a
130 /// particular stride.
131 std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
133 /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
134 /// We use this to iterate over the IVUsesByStride collection without being
135 /// dependent on random ordering of pointers in the process.
136 SmallVector<SCEVHandle, 16> StrideOrder;
138 /// GEPlist - A list of the GEP's that have been remembered in the SCEV
139 /// data structures. SCEV does not know to update these when the operands
140 /// of the GEP are changed, which means we cannot leave them live across
142 SmallVector<GetElementPtrInst *, 16> GEPlist;
144 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
145 /// of the casted version of each value. This is accessed by
146 /// getCastedVersionOf.
147 DenseMap<Value*, Value*> CastedPointers;
149 /// DeadInsts - Keep track of instructions we may have made dead, so that
150 /// we can remove them after we are done working.
151 SmallVector<Instruction*, 16> DeadInsts;
153 /// TLI - Keep a pointer of a TargetLowering to consult for determining
154 /// transformation profitability.
155 const TargetLowering *TLI;
158 static char ID; // Pass ID, replacement for typeid
159 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
160 LoopPass(&ID), TLI(tli) {
163 bool runOnLoop(Loop *L, LPPassManager &LPM);
165 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
166 // We split critical edges, so we change the CFG. However, we do update
167 // many analyses if they are around.
168 AU.addPreservedID(LoopSimplifyID);
169 AU.addPreserved<LoopInfo>();
170 AU.addPreserved<DominanceFrontier>();
171 AU.addPreserved<DominatorTree>();
173 AU.addRequiredID(LoopSimplifyID);
174 AU.addRequired<LoopInfo>();
175 AU.addRequired<DominatorTree>();
176 AU.addRequired<TargetData>();
177 AU.addRequired<ScalarEvolution>();
178 AU.addPreserved<ScalarEvolution>();
181 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
183 Value *getCastedVersionOf(Instruction::CastOps opcode, Value *V);
185 bool AddUsersIfInteresting(Instruction *I, Loop *L,
186 SmallPtrSet<Instruction*,16> &Processed);
187 SCEVHandle GetExpressionSCEV(Instruction *E);
188 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
189 IVStrideUse* &CondUse,
190 const SCEVHandle* &CondStride);
191 void OptimizeIndvars(Loop *L);
193 /// OptimizeShadowIV - If IV is used in a int-to-float cast
194 /// inside the loop then try to eliminate the cast opeation.
195 void OptimizeShadowIV(Loop *L);
197 /// OptimizeSMax - Rewrite the loop's terminating condition
198 /// if it uses an smax computation.
199 ICmpInst *OptimizeSMax(Loop *L, ICmpInst *Cond,
200 IVStrideUse* &CondUse);
202 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
203 const SCEVHandle *&CondStride);
204 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
205 SCEVHandle CheckForIVReuse(bool, bool, bool, const SCEVHandle&,
206 IVExpr&, const Type*,
207 const std::vector<BasedUser>& UsersToProcess);
208 bool ValidStride(bool, int64_t,
209 const std::vector<BasedUser>& UsersToProcess);
210 SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
211 IVUsersOfOneStride &Uses,
213 bool &AllUsesAreAddresses,
214 bool &AllUsesAreOutsideLoop,
215 std::vector<BasedUser> &UsersToProcess);
216 bool ShouldUseFullStrengthReductionMode(
217 const std::vector<BasedUser> &UsersToProcess,
219 bool AllUsesAreAddresses,
221 void PrepareToStrengthReduceFully(
222 std::vector<BasedUser> &UsersToProcess,
224 SCEVHandle CommonExprs,
226 SCEVExpander &PreheaderRewriter);
227 void PrepareToStrengthReduceFromSmallerStride(
228 std::vector<BasedUser> &UsersToProcess,
230 const IVExpr &ReuseIV,
231 Instruction *PreInsertPt);
232 void PrepareToStrengthReduceWithNewPhi(
233 std::vector<BasedUser> &UsersToProcess,
235 SCEVHandle CommonExprs,
238 SCEVExpander &PreheaderRewriter);
239 void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
240 IVUsersOfOneStride &Uses,
241 Loop *L, bool isOnlyStride);
242 void DeleteTriviallyDeadInstructions();
246 char LoopStrengthReduce::ID = 0;
247 static RegisterPass<LoopStrengthReduce>
248 X("loop-reduce", "Loop Strength Reduction");
250 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
251 return new LoopStrengthReduce(TLI);
254 /// getCastedVersionOf - Return the specified value casted to uintptr_t. This
255 /// assumes that the Value* V is of integer or pointer type only.
257 Value *LoopStrengthReduce::getCastedVersionOf(Instruction::CastOps opcode,
259 if (V->getType() == UIntPtrTy) return V;
260 if (Constant *CB = dyn_cast<Constant>(V))
261 return ConstantExpr::getCast(opcode, CB, UIntPtrTy);
263 Value *&New = CastedPointers[V];
266 New = SCEVExpander::InsertCastOfTo(opcode, V, UIntPtrTy);
267 DeadInsts.push_back(cast<Instruction>(New));
272 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
273 /// specified set are trivially dead, delete them and see if this makes any of
274 /// their operands subsequently dead.
275 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
276 if (DeadInsts.empty()) return;
278 // Sort the deadinsts list so that we can trivially eliminate duplicates as we
279 // go. The code below never adds a non-dead instruction to the worklist, but
280 // callers may not be so careful.
281 array_pod_sort(DeadInsts.begin(), DeadInsts.end());
283 // Drop duplicate instructions and those with uses.
284 for (unsigned i = 0, e = DeadInsts.size()-1; i < e; ++i) {
285 Instruction *I = DeadInsts[i];
286 if (!I->use_empty()) DeadInsts[i] = 0;
287 while (i != e && DeadInsts[i+1] == I)
291 while (!DeadInsts.empty()) {
292 Instruction *I = DeadInsts.back();
293 DeadInsts.pop_back();
295 if (I == 0 || !isInstructionTriviallyDead(I))
298 SE->deleteValueFromRecords(I);
300 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
301 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
304 DeadInsts.push_back(U);
308 I->eraseFromParent();
314 /// GetExpressionSCEV - Compute and return the SCEV for the specified
316 SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
317 // Pointer to pointer bitcast instructions return the same value as their
319 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Exp)) {
320 if (SE->hasSCEV(BCI) || !isa<Instruction>(BCI->getOperand(0)))
321 return SE->getSCEV(BCI);
322 SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)));
327 // Scalar Evolutions doesn't know how to compute SCEV's for GEP instructions.
328 // If this is a GEP that SE doesn't know about, compute it now and insert it.
329 // If this is not a GEP, or if we have already done this computation, just let
331 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Exp);
332 if (!GEP || SE->hasSCEV(GEP))
333 return SE->getSCEV(Exp);
335 // Analyze all of the subscripts of this getelementptr instruction, looking
336 // for uses that are determined by the trip count of the loop. First, skip
337 // all operands the are not dependent on the IV.
339 // Build up the base expression. Insert an LLVM cast of the pointer to
341 SCEVHandle GEPVal = SE->getUnknown(
342 getCastedVersionOf(Instruction::PtrToInt, GEP->getOperand(0)));
344 gep_type_iterator GTI = gep_type_begin(GEP);
346 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
347 i != e; ++i, ++GTI) {
348 // If this is a use of a recurrence that we can analyze, and it comes before
349 // Op does in the GEP operand list, we will handle this when we process this
351 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
352 const StructLayout *SL = TD->getStructLayout(STy);
353 unsigned Idx = cast<ConstantInt>(*i)->getZExtValue();
354 uint64_t Offset = SL->getElementOffset(Idx);
355 GEPVal = SE->getAddExpr(GEPVal,
356 SE->getIntegerSCEV(Offset, UIntPtrTy));
358 unsigned GEPOpiBits =
359 (*i)->getType()->getPrimitiveSizeInBits();
360 unsigned IntPtrBits = UIntPtrTy->getPrimitiveSizeInBits();
361 Instruction::CastOps opcode = (GEPOpiBits < IntPtrBits ?
362 Instruction::SExt : (GEPOpiBits > IntPtrBits ? Instruction::Trunc :
363 Instruction::BitCast));
364 Value *OpVal = getCastedVersionOf(opcode, *i);
365 SCEVHandle Idx = SE->getSCEV(OpVal);
367 uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType());
369 Idx = SE->getMulExpr(Idx,
370 SE->getConstant(ConstantInt::get(UIntPtrTy,
372 GEPVal = SE->getAddExpr(GEPVal, Idx);
376 SE->setSCEV(GEP, GEPVal);
377 GEPlist.push_back(GEP);
381 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
382 /// subexpression that is an AddRec from a loop other than L. An outer loop
383 /// of L is OK, but not an inner loop nor a disjoint loop.
384 static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) {
385 // This is very common, put it first.
386 if (isa<SCEVConstant>(S))
388 if (SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
389 for (unsigned int i=0; i< AE->getNumOperands(); i++)
390 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
394 if (SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
395 if (const Loop *newLoop = AE->getLoop()) {
398 // if newLoop is an outer loop of L, this is OK.
399 if (!LoopInfoBase<BasicBlock>::isNotAlreadyContainedIn(L, newLoop))
404 if (SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
405 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
406 containsAddRecFromDifferentLoop(DE->getRHS(), L);
408 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
409 // need this when it is.
410 if (SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
411 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
412 containsAddRecFromDifferentLoop(DE->getRHS(), L);
414 if (SCEVTruncateExpr *TE = dyn_cast<SCEVTruncateExpr>(S))
415 return containsAddRecFromDifferentLoop(TE->getOperand(), L);
416 if (SCEVZeroExtendExpr *ZE = dyn_cast<SCEVZeroExtendExpr>(S))
417 return containsAddRecFromDifferentLoop(ZE->getOperand(), L);
418 if (SCEVSignExtendExpr *SE = dyn_cast<SCEVSignExtendExpr>(S))
419 return containsAddRecFromDifferentLoop(SE->getOperand(), L);
423 /// getSCEVStartAndStride - Compute the start and stride of this expression,
424 /// returning false if the expression is not a start/stride pair, or true if it
425 /// is. The stride must be a loop invariant expression, but the start may be
426 /// a mix of loop invariant and loop variant expressions. The start cannot,
427 /// however, contain an AddRec from a different loop, unless that loop is an
428 /// outer loop of the current loop.
429 static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
430 SCEVHandle &Start, SCEVHandle &Stride,
431 ScalarEvolution *SE, DominatorTree *DT) {
432 SCEVHandle TheAddRec = Start; // Initialize to zero.
434 // If the outer level is an AddExpr, the operands are all start values except
435 // for a nested AddRecExpr.
436 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
437 for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
438 if (SCEVAddRecExpr *AddRec =
439 dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
440 if (AddRec->getLoop() == L)
441 TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
443 return false; // Nested IV of some sort?
445 Start = SE->getAddExpr(Start, AE->getOperand(i));
448 } else if (isa<SCEVAddRecExpr>(SH)) {
451 return false; // not analyzable.
454 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
455 if (!AddRec || AddRec->getLoop() != L) return false;
457 // FIXME: Generalize to non-affine IV's.
458 if (!AddRec->isAffine()) return false;
460 // If Start contains an SCEVAddRecExpr from a different loop, other than an
461 // outer loop of the current loop, reject it. SCEV has no concept of
462 // operating on one loop at a time so don't confuse it with such expressions.
463 if (containsAddRecFromDifferentLoop(AddRec->getOperand(0), L))
466 Start = SE->getAddExpr(Start, AddRec->getOperand(0));
468 if (!isa<SCEVConstant>(AddRec->getOperand(1))) {
469 // If stride is an instruction, make sure it dominates the loop preheader.
470 // Otherwise we could end up with a use before def situation.
471 BasicBlock *Preheader = L->getLoopPreheader();
472 if (!AddRec->getOperand(1)->dominates(Preheader, DT))
475 DOUT << "[" << L->getHeader()->getName()
476 << "] Variable stride: " << *AddRec << "\n";
479 Stride = AddRec->getOperand(1);
483 /// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
484 /// and now we need to decide whether the user should use the preinc or post-inc
485 /// value. If this user should use the post-inc version of the IV, return true.
487 /// Choosing wrong here can break dominance properties (if we choose to use the
488 /// post-inc value when we cannot) or it can end up adding extra live-ranges to
489 /// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
490 /// should use the post-inc value).
491 static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
492 Loop *L, DominatorTree *DT, Pass *P,
493 SmallVectorImpl<Instruction*> &DeadInsts){
494 // If the user is in the loop, use the preinc value.
495 if (L->contains(User->getParent())) return false;
497 BasicBlock *LatchBlock = L->getLoopLatch();
499 // Ok, the user is outside of the loop. If it is dominated by the latch
500 // block, use the post-inc value.
501 if (DT->dominates(LatchBlock, User->getParent()))
504 // There is one case we have to be careful of: PHI nodes. These little guys
505 // can live in blocks that do not dominate the latch block, but (since their
506 // uses occur in the predecessor block, not the block the PHI lives in) should
507 // still use the post-inc value. Check for this case now.
508 PHINode *PN = dyn_cast<PHINode>(User);
509 if (!PN) return false; // not a phi, not dominated by latch block.
511 // Look at all of the uses of IV by the PHI node. If any use corresponds to
512 // a block that is not dominated by the latch block, give up and use the
513 // preincremented value.
514 unsigned NumUses = 0;
515 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
516 if (PN->getIncomingValue(i) == IV) {
518 if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
522 // Okay, all uses of IV by PN are in predecessor blocks that really are
523 // dominated by the latch block. Split the critical edges and use the
524 // post-incremented value.
525 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
526 if (PN->getIncomingValue(i) == IV) {
527 SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
528 // Splitting the critical edge can reduce the number of entries in this
530 e = PN->getNumIncomingValues();
531 if (--NumUses == 0) break;
534 // PHI node might have become a constant value after SplitCriticalEdge.
535 DeadInsts.push_back(User);
540 /// isAddressUse - Returns true if the specified instruction is using the
541 /// specified value as an address.
542 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
543 bool isAddress = isa<LoadInst>(Inst);
544 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
545 if (SI->getOperand(1) == OperandVal)
547 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
548 // Addressing modes can also be folded into prefetches and a variety
550 switch (II->getIntrinsicID()) {
552 case Intrinsic::prefetch:
553 case Intrinsic::x86_sse2_loadu_dq:
554 case Intrinsic::x86_sse2_loadu_pd:
555 case Intrinsic::x86_sse_loadu_ps:
556 case Intrinsic::x86_sse_storeu_ps:
557 case Intrinsic::x86_sse2_storeu_pd:
558 case Intrinsic::x86_sse2_storeu_dq:
559 case Intrinsic::x86_sse2_storel_dq:
560 if (II->getOperand(1) == OperandVal)
568 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
569 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
570 /// return true. Otherwise, return false.
571 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
572 SmallPtrSet<Instruction*,16> &Processed) {
573 if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
574 return false; // Void and FP expressions cannot be reduced.
575 if (!Processed.insert(I))
576 return true; // Instruction already handled.
578 // Get the symbolic expression for this instruction.
579 SCEVHandle ISE = GetExpressionSCEV(I);
580 if (isa<SCEVCouldNotCompute>(ISE)) return false;
582 // Get the start and stride for this expression.
583 SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType());
584 SCEVHandle Stride = Start;
585 if (!getSCEVStartAndStride(ISE, L, Start, Stride, SE, DT))
586 return false; // Non-reducible symbolic expression, bail out.
588 std::vector<Instruction *> IUsers;
589 // Collect all I uses now because IVUseShouldUsePostIncValue may
590 // invalidate use_iterator.
591 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
592 IUsers.push_back(cast<Instruction>(*UI));
594 for (unsigned iused_index = 0, iused_size = IUsers.size();
595 iused_index != iused_size; ++iused_index) {
597 Instruction *User = IUsers[iused_index];
599 // Do not infinitely recurse on PHI nodes.
600 if (isa<PHINode>(User) && Processed.count(User))
603 // Descend recursively, but not into PHI nodes outside the current loop.
604 // It's important to see the entire expression outside the loop to get
605 // choices that depend on addressing mode use right, although we won't
606 // consider references ouside the loop in all cases.
607 // If User is already in Processed, we don't want to recurse into it again,
608 // but do want to record a second reference in the same instruction.
609 bool AddUserToIVUsers = false;
610 if (LI->getLoopFor(User->getParent()) != L) {
611 if (isa<PHINode>(User) || Processed.count(User) ||
612 !AddUsersIfInteresting(User, L, Processed)) {
613 DOUT << "FOUND USER in other loop: " << *User
614 << " OF SCEV: " << *ISE << "\n";
615 AddUserToIVUsers = true;
617 } else if (Processed.count(User) ||
618 !AddUsersIfInteresting(User, L, Processed)) {
619 DOUT << "FOUND USER: " << *User
620 << " OF SCEV: " << *ISE << "\n";
621 AddUserToIVUsers = true;
624 if (AddUserToIVUsers) {
625 IVUsersOfOneStride &StrideUses = IVUsesByStride[Stride];
626 if (StrideUses.Users.empty()) // First occurrence of this stride?
627 StrideOrder.push_back(Stride);
629 // Okay, we found a user that we cannot reduce. Analyze the instruction
630 // and decide what to do with it. If we are a use inside of the loop, use
631 // the value before incrementation, otherwise use it after incrementation.
632 if (IVUseShouldUsePostIncValue(User, I, L, DT, this, DeadInsts)) {
633 // The value used will be incremented by the stride more than we are
634 // expecting, so subtract this off.
635 SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
636 StrideUses.addUser(NewStart, User, I);
637 StrideUses.Users.back().isUseOfPostIncrementedValue = true;
638 DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n";
640 StrideUses.addUser(Start, User, I);
648 /// BasedUser - For a particular base value, keep information about how we've
649 /// partitioned the expression so far.
651 /// SE - The current ScalarEvolution object.
654 /// Base - The Base value for the PHI node that needs to be inserted for
655 /// this use. As the use is processed, information gets moved from this
656 /// field to the Imm field (below). BasedUser values are sorted by this
660 /// Inst - The instruction using the induction variable.
663 /// OperandValToReplace - The operand value of Inst to replace with the
665 Value *OperandValToReplace;
667 /// Imm - The immediate value that should be added to the base immediately
668 /// before Inst, because it will be folded into the imm field of the
669 /// instruction. This is also sometimes used for loop-variant values that
670 /// must be added inside the loop.
673 /// Phi - The induction variable that performs the striding that
674 /// should be used for this user.
677 /// IncV - The post-incremented value of Phi.
680 // isUseOfPostIncrementedValue - True if this should use the
681 // post-incremented version of this IV, not the preincremented version.
682 // This can only be set in special cases, such as the terminating setcc
683 // instruction for a loop and uses outside the loop that are dominated by
685 bool isUseOfPostIncrementedValue;
687 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
688 : SE(se), Base(IVSU.Offset), Inst(IVSU.User),
689 OperandValToReplace(IVSU.OperandValToReplace),
690 Imm(SE->getIntegerSCEV(0, Base->getType())),
691 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue) {}
693 // Once we rewrite the code to insert the new IVs we want, update the
694 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
696 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
697 Instruction *InsertPt,
698 SCEVExpander &Rewriter, Loop *L, Pass *P,
699 SmallVectorImpl<Instruction*> &DeadInsts);
701 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
702 SCEVExpander &Rewriter,
703 Instruction *IP, Loop *L);
708 void BasedUser::dump() const {
709 cerr << " Base=" << *Base;
710 cerr << " Imm=" << *Imm;
711 cerr << " Inst: " << *Inst;
714 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
715 SCEVExpander &Rewriter,
716 Instruction *IP, Loop *L) {
717 // Figure out where we *really* want to insert this code. In particular, if
718 // the user is inside of a loop that is nested inside of L, we really don't
719 // want to insert this expression before the user, we'd rather pull it out as
720 // many loops as possible.
721 LoopInfo &LI = Rewriter.getLoopInfo();
722 Instruction *BaseInsertPt = IP;
724 // Figure out the most-nested loop that IP is in.
725 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
727 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
728 // the preheader of the outer-most loop where NewBase is not loop invariant.
729 if (L->contains(IP->getParent()))
730 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
731 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
732 InsertLoop = InsertLoop->getParentLoop();
735 Value *Base = Rewriter.expandCodeFor(NewBase, BaseInsertPt);
737 // If there is no immediate value, skip the next part.
741 // If we are inserting the base and imm values in the same block, make sure to
742 // adjust the IP position if insertion reused a result.
743 if (IP == BaseInsertPt)
744 IP = Rewriter.getInsertionPoint();
746 // Always emit the immediate (if non-zero) into the same block as the user.
747 SCEVHandle NewValSCEV = SE->getAddExpr(SE->getUnknown(Base), Imm);
748 return Rewriter.expandCodeFor(NewValSCEV, IP);
753 // Once we rewrite the code to insert the new IVs we want, update the
754 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
755 // to it. NewBasePt is the last instruction which contributes to the
756 // value of NewBase in the case that it's a diffferent instruction from
757 // the PHI that NewBase is computed from, or null otherwise.
759 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
760 Instruction *NewBasePt,
761 SCEVExpander &Rewriter, Loop *L, Pass *P,
762 SmallVectorImpl<Instruction*> &DeadInsts){
763 if (!isa<PHINode>(Inst)) {
764 // By default, insert code at the user instruction.
765 BasicBlock::iterator InsertPt = Inst;
767 // However, if the Operand is itself an instruction, the (potentially
768 // complex) inserted code may be shared by many users. Because of this, we
769 // want to emit code for the computation of the operand right before its old
770 // computation. This is usually safe, because we obviously used to use the
771 // computation when it was computed in its current block. However, in some
772 // cases (e.g. use of a post-incremented induction variable) the NewBase
773 // value will be pinned to live somewhere after the original computation.
774 // In this case, we have to back off.
776 // If this is a use outside the loop (which means after, since it is based
777 // on a loop indvar) we use the post-incremented value, so that we don't
778 // artificially make the preinc value live out the bottom of the loop.
779 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
780 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
781 InsertPt = NewBasePt;
783 } else if (Instruction *OpInst
784 = dyn_cast<Instruction>(OperandValToReplace)) {
786 while (isa<PHINode>(InsertPt)) ++InsertPt;
789 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
790 // Adjust the type back to match the Inst. Note that we can't use InsertPt
791 // here because the SCEVExpander may have inserted the instructions after
792 // that point, in its efforts to avoid inserting redundant expressions.
793 if (isa<PointerType>(OperandValToReplace->getType())) {
794 NewVal = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
796 OperandValToReplace->getType());
798 // Replace the use of the operand Value with the new Phi we just created.
799 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
801 DOUT << " Replacing with ";
802 DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false));
803 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
807 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
808 // expression into each operand block that uses it. Note that PHI nodes can
809 // have multiple entries for the same predecessor. We use a map to make sure
810 // that a PHI node only has a single Value* for each predecessor (which also
811 // prevents us from inserting duplicate code in some blocks).
812 DenseMap<BasicBlock*, Value*> InsertedCode;
813 PHINode *PN = cast<PHINode>(Inst);
814 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
815 if (PN->getIncomingValue(i) == OperandValToReplace) {
816 // If the original expression is outside the loop, put the replacement
817 // code in the same place as the original expression,
818 // which need not be an immediate predecessor of this PHI. This way we
819 // need only one copy of it even if it is referenced multiple times in
820 // the PHI. We don't do this when the original expression is inside the
821 // loop because multiple copies sometimes do useful sinking of code in
823 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
824 if (L->contains(OldLoc->getParent())) {
825 // If this is a critical edge, split the edge so that we do not insert
826 // the code on all predecessor/successor paths. We do this unless this
827 // is the canonical backedge for this loop, as this can make some
828 // inserted code be in an illegal position.
829 BasicBlock *PHIPred = PN->getIncomingBlock(i);
830 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
831 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
833 // First step, split the critical edge.
834 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
836 // Next step: move the basic block. In particular, if the PHI node
837 // is outside of the loop, and PredTI is in the loop, we want to
838 // move the block to be immediately before the PHI block, not
839 // immediately after PredTI.
840 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
841 BasicBlock *NewBB = PN->getIncomingBlock(i);
842 NewBB->moveBefore(PN->getParent());
845 // Splitting the edge can reduce the number of PHI entries we have.
846 e = PN->getNumIncomingValues();
849 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
851 // Insert the code into the end of the predecessor block.
852 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
853 PN->getIncomingBlock(i)->getTerminator() :
854 OldLoc->getParent()->getTerminator();
855 Code = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
857 // Adjust the type back to match the PHI. Note that we can't use
858 // InsertPt here because the SCEVExpander may have inserted its
859 // instructions after that point, in its efforts to avoid inserting
860 // redundant expressions.
861 if (isa<PointerType>(PN->getType())) {
862 Code = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
867 DOUT << " Changing PHI use to ";
868 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
869 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
872 // Replace the use of the operand Value with the new Phi we just created.
873 PN->setIncomingValue(i, Code);
878 // PHI node might have become a constant value after SplitCriticalEdge.
879 DeadInsts.push_back(Inst);
883 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
884 /// mode, and does not need to be put in a register first.
885 static bool fitsInAddressMode(const SCEVHandle &V, const Type *UseTy,
886 const TargetLowering *TLI, bool HasBaseReg) {
887 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
888 int64_t VC = SC->getValue()->getSExtValue();
890 TargetLowering::AddrMode AM;
892 AM.HasBaseReg = HasBaseReg;
893 return TLI->isLegalAddressingMode(AM, UseTy);
895 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
896 return (VC > -(1 << 16) && VC < (1 << 16)-1);
900 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
901 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
902 if (TLI && CE->getOpcode() == Instruction::PtrToInt) {
903 Constant *Op0 = CE->getOperand(0);
904 if (GlobalValue *GV = dyn_cast<GlobalValue>(Op0)) {
905 TargetLowering::AddrMode AM;
907 AM.HasBaseReg = HasBaseReg;
908 return TLI->isLegalAddressingMode(AM, UseTy);
914 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
915 /// loop varying to the Imm operand.
916 static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm,
917 Loop *L, ScalarEvolution *SE) {
918 if (Val->isLoopInvariant(L)) return; // Nothing to do.
920 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
921 std::vector<SCEVHandle> NewOps;
922 NewOps.reserve(SAE->getNumOperands());
924 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
925 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
926 // If this is a loop-variant expression, it must stay in the immediate
927 // field of the expression.
928 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
930 NewOps.push_back(SAE->getOperand(i));
934 Val = SE->getIntegerSCEV(0, Val->getType());
936 Val = SE->getAddExpr(NewOps);
937 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
938 // Try to pull immediates out of the start value of nested addrec's.
939 SCEVHandle Start = SARE->getStart();
940 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
942 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
944 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
946 // Otherwise, all of Val is variant, move the whole thing over.
947 Imm = SE->getAddExpr(Imm, Val);
948 Val = SE->getIntegerSCEV(0, Val->getType());
953 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
954 /// that can fit into the immediate field of instructions in the target.
955 /// Accumulate these immediate values into the Imm value.
956 static void MoveImmediateValues(const TargetLowering *TLI,
958 SCEVHandle &Val, SCEVHandle &Imm,
959 bool isAddress, Loop *L,
960 ScalarEvolution *SE) {
961 const Type *UseTy = User->getType();
962 if (StoreInst *SI = dyn_cast<StoreInst>(User))
963 UseTy = SI->getOperand(0)->getType();
965 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
966 std::vector<SCEVHandle> NewOps;
967 NewOps.reserve(SAE->getNumOperands());
969 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
970 SCEVHandle NewOp = SAE->getOperand(i);
971 MoveImmediateValues(TLI, User, NewOp, Imm, isAddress, L, SE);
973 if (!NewOp->isLoopInvariant(L)) {
974 // If this is a loop-variant expression, it must stay in the immediate
975 // field of the expression.
976 Imm = SE->getAddExpr(Imm, NewOp);
978 NewOps.push_back(NewOp);
983 Val = SE->getIntegerSCEV(0, Val->getType());
985 Val = SE->getAddExpr(NewOps);
987 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
988 // Try to pull immediates out of the start value of nested addrec's.
989 SCEVHandle Start = SARE->getStart();
990 MoveImmediateValues(TLI, User, Start, Imm, isAddress, L, SE);
992 if (Start != SARE->getStart()) {
993 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
995 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
998 } else if (SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
999 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
1000 if (isAddress && fitsInAddressMode(SME->getOperand(0), UseTy, TLI, false) &&
1001 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
1003 SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType());
1004 SCEVHandle NewOp = SME->getOperand(1);
1005 MoveImmediateValues(TLI, User, NewOp, SubImm, isAddress, L, SE);
1007 // If we extracted something out of the subexpressions, see if we can
1009 if (NewOp != SME->getOperand(1)) {
1010 // Scale SubImm up by "8". If the result is a target constant, we are
1012 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
1013 if (fitsInAddressMode(SubImm, UseTy, TLI, false)) {
1014 // Accumulate the immediate.
1015 Imm = SE->getAddExpr(Imm, SubImm);
1017 // Update what is left of 'Val'.
1018 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
1025 // Loop-variant expressions must stay in the immediate field of the
1027 if ((isAddress && fitsInAddressMode(Val, UseTy, TLI, false)) ||
1028 !Val->isLoopInvariant(L)) {
1029 Imm = SE->getAddExpr(Imm, Val);
1030 Val = SE->getIntegerSCEV(0, Val->getType());
1034 // Otherwise, no immediates to move.
1038 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
1039 /// added together. This is used to reassociate common addition subexprs
1040 /// together for maximal sharing when rewriting bases.
1041 static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
1043 ScalarEvolution *SE) {
1044 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
1045 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
1046 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
1047 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
1048 SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
1049 if (SARE->getOperand(0) == Zero) {
1050 SubExprs.push_back(Expr);
1052 // Compute the addrec with zero as its base.
1053 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
1054 Ops[0] = Zero; // Start with zero base.
1055 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
1058 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
1060 } else if (!Expr->isZero()) {
1062 SubExprs.push_back(Expr);
1066 // This is logically local to the following function, but C++ says we have
1067 // to make it file scope.
1068 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
1070 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
1071 /// the Uses, removing any common subexpressions, except that if all such
1072 /// subexpressions can be folded into an addressing mode for all uses inside
1073 /// the loop (this case is referred to as "free" in comments herein) we do
1074 /// not remove anything. This looks for things like (a+b+c) and
1075 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
1076 /// is *removed* from the Bases and returned.
1078 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
1079 ScalarEvolution *SE, Loop *L,
1080 const TargetLowering *TLI) {
1081 unsigned NumUses = Uses.size();
1083 // Only one use? This is a very common case, so we handle it specially and
1085 SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
1086 SCEVHandle Result = Zero;
1087 SCEVHandle FreeResult = Zero;
1089 // If the use is inside the loop, use its base, regardless of what it is:
1090 // it is clearly shared across all the IV's. If the use is outside the loop
1091 // (which means after it) we don't want to factor anything *into* the loop,
1092 // so just use 0 as the base.
1093 if (L->contains(Uses[0].Inst->getParent()))
1094 std::swap(Result, Uses[0].Base);
1098 // To find common subexpressions, count how many of Uses use each expression.
1099 // If any subexpressions are used Uses.size() times, they are common.
1100 // Also track whether all uses of each expression can be moved into an
1101 // an addressing mode "for free"; such expressions are left within the loop.
1102 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
1103 std::map<SCEVHandle, SubExprUseData> SubExpressionUseData;
1105 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
1106 // order we see them.
1107 std::vector<SCEVHandle> UniqueSubExprs;
1109 std::vector<SCEVHandle> SubExprs;
1110 unsigned NumUsesInsideLoop = 0;
1111 for (unsigned i = 0; i != NumUses; ++i) {
1112 // If the user is outside the loop, just ignore it for base computation.
1113 // Since the user is outside the loop, it must be *after* the loop (if it
1114 // were before, it could not be based on the loop IV). We don't want users
1115 // after the loop to affect base computation of values *inside* the loop,
1116 // because we can always add their offsets to the result IV after the loop
1117 // is done, ensuring we get good code inside the loop.
1118 if (!L->contains(Uses[i].Inst->getParent()))
1120 NumUsesInsideLoop++;
1122 // If the base is zero (which is common), return zero now, there are no
1123 // CSEs we can find.
1124 if (Uses[i].Base == Zero) return Zero;
1126 // If this use is as an address we may be able to put CSEs in the addressing
1127 // mode rather than hoisting them.
1128 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
1129 // We may need the UseTy below, but only when isAddrUse, so compute it
1130 // only in that case.
1131 const Type *UseTy = 0;
1133 UseTy = Uses[i].Inst->getType();
1134 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1135 UseTy = SI->getOperand(0)->getType();
1138 // Split the expression into subexprs.
1139 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1140 // Add one to SubExpressionUseData.Count for each subexpr present, and
1141 // if the subexpr is not a valid immediate within an addressing mode use,
1142 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
1143 // hoist these out of the loop (if they are common to all uses).
1144 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1145 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
1146 UniqueSubExprs.push_back(SubExprs[j]);
1147 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], UseTy, TLI, false))
1148 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
1153 // Now that we know how many times each is used, build Result. Iterate over
1154 // UniqueSubexprs so that we have a stable ordering.
1155 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
1156 std::map<SCEVHandle, SubExprUseData>::iterator I =
1157 SubExpressionUseData.find(UniqueSubExprs[i]);
1158 assert(I != SubExpressionUseData.end() && "Entry not found?");
1159 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
1160 if (I->second.notAllUsesAreFree)
1161 Result = SE->getAddExpr(Result, I->first);
1163 FreeResult = SE->getAddExpr(FreeResult, I->first);
1165 // Remove non-cse's from SubExpressionUseData.
1166 SubExpressionUseData.erase(I);
1169 if (FreeResult != Zero) {
1170 // We have some subexpressions that can be subsumed into addressing
1171 // modes in every use inside the loop. However, it's possible that
1172 // there are so many of them that the combined FreeResult cannot
1173 // be subsumed, or that the target cannot handle both a FreeResult
1174 // and a Result in the same instruction (for example because it would
1175 // require too many registers). Check this.
1176 for (unsigned i=0; i<NumUses; ++i) {
1177 if (!L->contains(Uses[i].Inst->getParent()))
1179 // We know this is an addressing mode use; if there are any uses that
1180 // are not, FreeResult would be Zero.
1181 const Type *UseTy = Uses[i].Inst->getType();
1182 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1183 UseTy = SI->getOperand(0)->getType();
1184 if (!fitsInAddressMode(FreeResult, UseTy, TLI, Result!=Zero)) {
1185 // FIXME: could split up FreeResult into pieces here, some hoisted
1186 // and some not. There is no obvious advantage to this.
1187 Result = SE->getAddExpr(Result, FreeResult);
1194 // If we found no CSE's, return now.
1195 if (Result == Zero) return Result;
1197 // If we still have a FreeResult, remove its subexpressions from
1198 // SubExpressionUseData. This means they will remain in the use Bases.
1199 if (FreeResult != Zero) {
1200 SeparateSubExprs(SubExprs, FreeResult, SE);
1201 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1202 std::map<SCEVHandle, SubExprUseData>::iterator I =
1203 SubExpressionUseData.find(SubExprs[j]);
1204 SubExpressionUseData.erase(I);
1209 // Otherwise, remove all of the CSE's we found from each of the base values.
1210 for (unsigned i = 0; i != NumUses; ++i) {
1211 // Uses outside the loop don't necessarily include the common base, but
1212 // the final IV value coming into those uses does. Instead of trying to
1213 // remove the pieces of the common base, which might not be there,
1214 // subtract off the base to compensate for this.
1215 if (!L->contains(Uses[i].Inst->getParent())) {
1216 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
1220 // Split the expression into subexprs.
1221 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1223 // Remove any common subexpressions.
1224 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
1225 if (SubExpressionUseData.count(SubExprs[j])) {
1226 SubExprs.erase(SubExprs.begin()+j);
1230 // Finally, add the non-shared expressions together.
1231 if (SubExprs.empty())
1232 Uses[i].Base = Zero;
1234 Uses[i].Base = SE->getAddExpr(SubExprs);
1241 /// ValidStride - Check whether the given Scale is valid for all loads and
1242 /// stores in UsersToProcess.
1244 bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
1246 const std::vector<BasedUser>& UsersToProcess) {
1250 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
1251 // If this is a load or other access, pass the type of the access in.
1252 const Type *AccessTy = Type::VoidTy;
1253 if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].Inst))
1254 AccessTy = SI->getOperand(0)->getType();
1255 else if (LoadInst *LI = dyn_cast<LoadInst>(UsersToProcess[i].Inst))
1256 AccessTy = LI->getType();
1257 else if (isa<PHINode>(UsersToProcess[i].Inst))
1260 TargetLowering::AddrMode AM;
1261 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
1262 AM.BaseOffs = SC->getValue()->getSExtValue();
1263 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
1266 // If load[imm+r*scale] is illegal, bail out.
1267 if (!TLI->isLegalAddressingMode(AM, AccessTy))
1273 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
1275 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
1279 if (Ty1->canLosslesslyBitCastTo(Ty2))
1281 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
1283 if (isa<PointerType>(Ty2) && Ty1->canLosslesslyBitCastTo(UIntPtrTy))
1285 if (isa<PointerType>(Ty1) && Ty2->canLosslesslyBitCastTo(UIntPtrTy))
1290 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
1291 /// of a previous stride and it is a legal value for the target addressing
1292 /// mode scale component and optional base reg. This allows the users of
1293 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
1294 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
1296 /// If all uses are outside the loop, we don't require that all multiplies
1297 /// be folded into the addressing mode, nor even that the factor be constant;
1298 /// a multiply (executed once) outside the loop is better than another IV
1299 /// within. Well, usually.
1300 SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
1301 bool AllUsesAreAddresses,
1302 bool AllUsesAreOutsideLoop,
1303 const SCEVHandle &Stride,
1304 IVExpr &IV, const Type *Ty,
1305 const std::vector<BasedUser>& UsersToProcess) {
1306 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
1307 int64_t SInt = SC->getValue()->getSExtValue();
1308 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1310 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1311 IVsByStride.find(StrideOrder[NewStride]);
1312 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1314 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1315 if (SI->first != Stride &&
1316 (unsigned(abs(SInt)) < SSInt || (SInt % SSInt) != 0))
1318 int64_t Scale = SInt / SSInt;
1319 // Check that this stride is valid for all the types used for loads and
1320 // stores; if it can be used for some and not others, we might as well use
1321 // the original stride everywhere, since we have to create the IV for it
1322 // anyway. If the scale is 1, then we don't need to worry about folding
1325 (AllUsesAreAddresses &&
1326 ValidStride(HasBaseReg, Scale, UsersToProcess)))
1327 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1328 IE = SI->second.IVs.end(); II != IE; ++II)
1329 // FIXME: Only handle base == 0 for now.
1330 // Only reuse previous IV if it would not require a type conversion.
1331 if (II->Base->isZero() &&
1332 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1334 return SE->getIntegerSCEV(Scale, Stride->getType());
1337 } else if (AllUsesAreOutsideLoop) {
1338 // Accept nonconstant strides here; it is really really right to substitute
1339 // an existing IV if we can.
1340 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1342 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1343 IVsByStride.find(StrideOrder[NewStride]);
1344 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1346 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1347 if (SI->first != Stride && SSInt != 1)
1349 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1350 IE = SI->second.IVs.end(); II != IE; ++II)
1351 // Accept nonzero base here.
1352 // Only reuse previous IV if it would not require a type conversion.
1353 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1358 // Special case, old IV is -1*x and this one is x. Can treat this one as
1360 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1362 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1363 IVsByStride.find(StrideOrder[NewStride]);
1364 if (SI == IVsByStride.end())
1366 if (SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1367 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1368 if (Stride == ME->getOperand(1) &&
1369 SC->getValue()->getSExtValue() == -1LL)
1370 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1371 IE = SI->second.IVs.end(); II != IE; ++II)
1372 // Accept nonzero base here.
1373 // Only reuse previous IV if it would not require type conversion.
1374 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1376 return SE->getIntegerSCEV(-1LL, Stride->getType());
1380 return SE->getIntegerSCEV(0, Stride->getType());
1383 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1384 /// returns true if Val's isUseOfPostIncrementedValue is true.
1385 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1386 return Val.isUseOfPostIncrementedValue;
1389 /// isNonConstantNegative - Return true if the specified scev is negated, but
1391 static bool isNonConstantNegative(const SCEVHandle &Expr) {
1392 SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1393 if (!Mul) return false;
1395 // If there is a constant factor, it will be first.
1396 SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1397 if (!SC) return false;
1399 // Return true if the value is negative, this matches things like (-42 * V).
1400 return SC->getValue()->getValue().isNegative();
1403 // CollectIVUsers - Transform our list of users and offsets to a bit more
1404 // complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1405 // of the strided accesses, as well as the old information from Uses. We
1406 // progressively move information from the Base field to the Imm field, until
1407 // we eventually have the full access expression to rewrite the use.
1408 SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride,
1409 IVUsersOfOneStride &Uses,
1411 bool &AllUsesAreAddresses,
1412 bool &AllUsesAreOutsideLoop,
1413 std::vector<BasedUser> &UsersToProcess) {
1414 UsersToProcess.reserve(Uses.Users.size());
1415 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i) {
1416 UsersToProcess.push_back(BasedUser(Uses.Users[i], SE));
1418 // Move any loop variant operands from the offset field to the immediate
1419 // field of the use, so that we don't try to use something before it is
1421 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1422 UsersToProcess.back().Imm, L, SE);
1423 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1424 "Base value is not loop invariant!");
1427 // We now have a whole bunch of uses of like-strided induction variables, but
1428 // they might all have different bases. We want to emit one PHI node for this
1429 // stride which we fold as many common expressions (between the IVs) into as
1430 // possible. Start by identifying the common expressions in the base values
1431 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1432 // "A+B"), emit it to the preheader, then remove the expression from the
1433 // UsersToProcess base values.
1434 SCEVHandle CommonExprs =
1435 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1437 // Next, figure out what we can represent in the immediate fields of
1438 // instructions. If we can represent anything there, move it to the imm
1439 // fields of the BasedUsers. We do this so that it increases the commonality
1440 // of the remaining uses.
1441 unsigned NumPHI = 0;
1442 bool HasAddress = false;
1443 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1444 // If the user is not in the current loop, this means it is using the exit
1445 // value of the IV. Do not put anything in the base, make sure it's all in
1446 // the immediate field to allow as much factoring as possible.
1447 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1448 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1449 UsersToProcess[i].Base);
1450 UsersToProcess[i].Base =
1451 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1453 // Addressing modes can be folded into loads and stores. Be careful that
1454 // the store is through the expression, not of the expression though.
1456 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1457 UsersToProcess[i].OperandValToReplace);
1458 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1463 // Not all uses are outside the loop.
1464 AllUsesAreOutsideLoop = false;
1469 // If this use isn't an address, then not all uses are addresses.
1470 if (!isAddress && !isPHI)
1471 AllUsesAreAddresses = false;
1473 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1474 UsersToProcess[i].Imm, isAddress, L, SE);
1478 // If one of the use if a PHI node and all other uses are addresses, still
1479 // allow iv reuse. Essentially we are trading one constant multiplication
1480 // for one fewer iv.
1482 AllUsesAreAddresses = false;
1484 // There are no in-loop address uses.
1485 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1486 AllUsesAreAddresses = false;
1491 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1492 /// is valid and profitable for the given set of users of a stride. In
1493 /// full strength-reduction mode, all addresses at the current stride are
1494 /// strength-reduced all the way down to pointer arithmetic.
1496 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1497 const std::vector<BasedUser> &UsersToProcess,
1499 bool AllUsesAreAddresses,
1500 SCEVHandle Stride) {
1501 if (!EnableFullLSRMode)
1504 // The heuristics below aim to avoid increasing register pressure, but
1505 // fully strength-reducing all the addresses increases the number of
1506 // add instructions, so don't do this when optimizing for size.
1507 // TODO: If the loop is large, the savings due to simpler addresses
1508 // may oughtweight the costs of the extra increment instructions.
1509 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1512 // TODO: For now, don't do full strength reduction if there could
1513 // potentially be greater-stride multiples of the current stride
1514 // which could reuse the current stride IV.
1515 if (StrideOrder.back() != Stride)
1518 // Iterate through the uses to find conditions that automatically rule out
1520 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1521 SCEV *Base = UsersToProcess[i].Base;
1522 SCEV *Imm = UsersToProcess[i].Imm;
1523 // If any users have a loop-variant component, they can't be fully
1524 // strength-reduced.
1525 if (Imm && !Imm->isLoopInvariant(L))
1527 // If there are to users with the same base and the difference between
1528 // the two Imm values can't be folded into the address, full
1529 // strength reduction would increase register pressure.
1531 SCEV *CurImm = UsersToProcess[i].Imm;
1532 if (CurImm || Imm && CurImm != Imm) {
1533 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1534 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
1535 const Instruction *Inst = UsersToProcess[i].Inst;
1536 const Type *UseTy = Inst->getType();
1537 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
1538 UseTy = SI->getOperand(0)->getType();
1539 SCEVHandle Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1540 if (!Diff->isZero() &&
1541 (!AllUsesAreAddresses ||
1542 !fitsInAddressMode(Diff, UseTy, TLI, /*HasBaseReg=*/true)))
1545 } while (++i != e && Base == UsersToProcess[i].Base);
1548 // If there's exactly one user in this stride, fully strength-reducing it
1549 // won't increase register pressure. If it's starting from a non-zero base,
1550 // it'll be simpler this way.
1551 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1554 // Otherwise, if there are any users in this stride that don't require
1555 // a register for their base, full strength-reduction will increase
1556 // register pressure.
1557 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1558 if (UsersToProcess[i].Base->isZero())
1561 // Otherwise, go for it.
1565 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1566 /// with the specified start and step values in the specified loop.
1568 /// If NegateStride is true, the stride should be negated by using a
1569 /// subtract instead of an add.
1571 /// Return the created phi node, and return the step instruction by
1572 /// reference in IncV.
1574 static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
1576 SCEVExpander &Rewriter,
1578 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1579 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1581 BasicBlock *Header = L->getHeader();
1582 BasicBlock *Preheader = L->getLoopPreheader();
1584 PHINode *PN = PHINode::Create(Start->getType(), "lsr.iv", Header->begin());
1585 PN->addIncoming(Rewriter.expandCodeFor(Start, Preheader->getTerminator()),
1588 pred_iterator HPI = pred_begin(Header);
1589 assert(HPI != pred_end(Header) && "Loop with zero preds???");
1590 if (!L->contains(*HPI)) ++HPI;
1591 assert(HPI != pred_end(Header) && L->contains(*HPI) &&
1592 "No backedge in loop?");
1594 // If the stride is negative, insert a sub instead of an add for the
1596 bool isNegative = isNonConstantNegative(Step);
1597 SCEVHandle IncAmount = Step;
1599 IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1601 // Insert an add instruction right before the terminator corresponding
1602 // to the back-edge.
1603 Value *StepV = Rewriter.expandCodeFor(IncAmount, Preheader->getTerminator());
1605 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1606 (*HPI)->getTerminator());
1608 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1609 (*HPI)->getTerminator());
1611 if (!isa<ConstantInt>(StepV)) ++NumVariable;
1613 pred_iterator PI = pred_begin(Header);
1614 if (*PI == L->getLoopPreheader())
1616 PN->addIncoming(IncV, *PI);
1622 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1623 // We want to emit code for users inside the loop first. To do this, we
1624 // rearrange BasedUser so that the entries at the end have
1625 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1626 // vector (so we handle them first).
1627 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1628 PartitionByIsUseOfPostIncrementedValue);
1630 // Sort this by base, so that things with the same base are handled
1631 // together. By partitioning first and stable-sorting later, we are
1632 // guaranteed that within each base we will pop off users from within the
1633 // loop before users outside of the loop with a particular base.
1635 // We would like to use stable_sort here, but we can't. The problem is that
1636 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1637 // we don't have anything to do a '<' comparison on. Because we think the
1638 // number of uses is small, do a horrible bubble sort which just relies on
1640 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1641 // Get a base value.
1642 SCEVHandle Base = UsersToProcess[i].Base;
1644 // Compact everything with this base to be consecutive with this one.
1645 for (unsigned j = i+1; j != e; ++j) {
1646 if (UsersToProcess[j].Base == Base) {
1647 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1654 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1655 /// UsersToProcess, meaning lowering addresses all the way down to direct
1656 /// pointer arithmetic.
1659 LoopStrengthReduce::PrepareToStrengthReduceFully(
1660 std::vector<BasedUser> &UsersToProcess,
1662 SCEVHandle CommonExprs,
1664 SCEVExpander &PreheaderRewriter) {
1665 DOUT << " Fully reducing all users\n";
1667 // Rewrite the UsersToProcess records, creating a separate PHI for each
1668 // unique Base value.
1669 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1670 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1671 // pick the first Imm value here to start with, and adjust it for the
1673 SCEVHandle Imm = UsersToProcess[i].Imm;
1674 SCEVHandle Base = UsersToProcess[i].Base;
1675 SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
1677 PHINode *Phi = InsertAffinePhi(Start, Stride, L,
1680 // Loop over all the users with the same base.
1682 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1683 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1684 UsersToProcess[i].Phi = Phi;
1685 UsersToProcess[i].IncV = IncV;
1686 assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1687 "ShouldUseFullStrengthReductionMode should reject this!");
1688 } while (++i != e && Base == UsersToProcess[i].Base);
1692 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1693 /// given users to share.
1696 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1697 std::vector<BasedUser> &UsersToProcess,
1699 SCEVHandle CommonExprs,
1702 SCEVExpander &PreheaderRewriter) {
1703 DOUT << " Inserting new PHI:\n";
1706 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1711 // Remember this in case a later stride is multiple of this.
1712 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi, IncV);
1714 // All the users will share this new IV.
1715 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1716 UsersToProcess[i].Phi = Phi;
1717 UsersToProcess[i].IncV = IncV;
1721 DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false));
1723 DEBUG(WriteAsOperand(*DOUT, IncV, /*PrintType=*/false));
1727 /// PrepareToStrengthReduceWithNewPhi - Prepare for the given users to reuse
1728 /// an induction variable with a stride that is a factor of the current
1729 /// induction variable.
1732 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1733 std::vector<BasedUser> &UsersToProcess,
1735 const IVExpr &ReuseIV,
1736 Instruction *PreInsertPt) {
1737 DOUT << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride
1738 << " and BASE " << *ReuseIV.Base << "\n";
1740 // All the users will share the reused IV.
1741 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1742 UsersToProcess[i].Phi = ReuseIV.PHI;
1743 UsersToProcess[i].IncV = ReuseIV.IncV;
1746 Constant *C = dyn_cast<Constant>(CommonBaseV);
1748 (!C->isNullValue() &&
1749 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1751 // We want the common base emitted into the preheader! This is just
1752 // using cast as a copy so BitCast (no-op cast) is appropriate
1753 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1754 "commonbase", PreInsertPt);
1757 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1758 /// stride of IV. All of the users may have different starting values, and this
1759 /// may not be the only stride (we know it is if isOnlyStride is true).
1760 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
1761 IVUsersOfOneStride &Uses,
1763 bool isOnlyStride) {
1764 // If all the users are moved to another stride, then there is nothing to do.
1765 if (Uses.Users.empty())
1768 // Keep track if every use in UsersToProcess is an address. If they all are,
1769 // we may be able to rewrite the entire collection of them in terms of a
1770 // smaller-stride IV.
1771 bool AllUsesAreAddresses = true;
1773 // Keep track if every use of a single stride is outside the loop. If so,
1774 // we want to be more aggressive about reusing a smaller-stride IV; a
1775 // multiply outside the loop is better than another IV inside. Well, usually.
1776 bool AllUsesAreOutsideLoop = true;
1778 // Transform our list of users and offsets to a bit more complex table. In
1779 // this new vector, each 'BasedUser' contains 'Base' the base of the
1780 // strided accessas well as the old information from Uses. We progressively
1781 // move information from the Base field to the Imm field, until we eventually
1782 // have the full access expression to rewrite the use.
1783 std::vector<BasedUser> UsersToProcess;
1784 SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1785 AllUsesAreOutsideLoop,
1788 // Sort the UsersToProcess array so that users with common bases are
1789 // next to each other.
1790 SortUsersToProcess(UsersToProcess);
1792 // If we managed to find some expressions in common, we'll need to carry
1793 // their value in a register and add it in for each use. This will take up
1794 // a register operand, which potentially restricts what stride values are
1796 bool HaveCommonExprs = !CommonExprs->isZero();
1798 const Type *ReplacedTy = CommonExprs->getType();
1800 // Now that we know what we need to do, insert the PHI node itself.
1802 DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1804 << " Common base: " << *CommonExprs << "\n";
1806 SCEVExpander Rewriter(*SE, *LI);
1807 SCEVExpander PreheaderRewriter(*SE, *LI);
1809 BasicBlock *Preheader = L->getLoopPreheader();
1810 Instruction *PreInsertPt = Preheader->getTerminator();
1811 BasicBlock *LatchBlock = L->getLoopLatch();
1813 Value *CommonBaseV = ConstantInt::get(ReplacedTy, 0);
1815 SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1816 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1817 SE->getIntegerSCEV(0, Type::Int32Ty),
1820 /// Choose a strength-reduction strategy and prepare for it by creating
1821 /// the necessary PHIs and adjusting the bookkeeping.
1822 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1823 AllUsesAreAddresses, Stride)) {
1824 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1827 // Emit the initial base value into the loop preheader.
1828 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, PreInsertPt);
1830 // If all uses are addresses, check if it is possible to reuse an IV with a
1831 // stride that is a factor of this stride. And that the multiple is a number
1832 // that can be encoded in the scale field of the target addressing mode. And
1833 // that we will have a valid instruction after this substition, including
1834 // the immediate field, if any.
1835 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1836 AllUsesAreOutsideLoop,
1837 Stride, ReuseIV, CommonExprs->getType(),
1839 if (isa<SCEVConstant>(RewriteFactor) &&
1840 cast<SCEVConstant>(RewriteFactor)->isZero())
1841 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1842 CommonBaseV, L, PreheaderRewriter);
1844 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1845 ReuseIV, PreInsertPt);
1848 // Process all the users now, replacing their strided uses with
1849 // strength-reduced forms. This outer loop handles all bases, the inner
1850 // loop handles all users of a particular base.
1851 while (!UsersToProcess.empty()) {
1852 SCEVHandle Base = UsersToProcess.back().Base;
1853 Instruction *Inst = UsersToProcess.back().Inst;
1855 // Emit the code for Base into the preheader.
1856 Value *BaseV = PreheaderRewriter.expandCodeFor(Base, PreInsertPt);
1858 DOUT << " Examining uses with BASE ";
1859 DEBUG(WriteAsOperand(*DOUT, BaseV, /*PrintType=*/false));
1862 // If BaseV is a constant other than 0, make sure that it gets inserted into
1863 // the preheader, instead of being forward substituted into the uses. We do
1864 // this by forcing a BitCast (noop cast) to be inserted into the preheader
1866 if (Constant *C = dyn_cast<Constant>(BaseV)) {
1867 if (!C->isNullValue() && !fitsInAddressMode(Base, ReplacedTy,
1869 // We want this constant emitted into the preheader! This is just
1870 // using cast as a copy so BitCast (no-op cast) is appropriate
1871 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1876 // Emit the code to add the immediate offset to the Phi value, just before
1877 // the instructions that we identified as using this stride and base.
1879 // FIXME: Use emitted users to emit other users.
1880 BasedUser &User = UsersToProcess.back();
1882 DOUT << " Examining use ";
1883 DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
1884 /*PrintType=*/false));
1885 DOUT << " in Inst: " << *Inst;
1887 // If this instruction wants to use the post-incremented value, move it
1888 // after the post-inc and use its value instead of the PHI.
1889 Value *RewriteOp = User.Phi;
1890 if (User.isUseOfPostIncrementedValue) {
1891 RewriteOp = User.IncV;
1893 // If this user is in the loop, make sure it is the last thing in the
1894 // loop to ensure it is dominated by the increment.
1895 if (L->contains(User.Inst->getParent()))
1896 User.Inst->moveBefore(LatchBlock->getTerminator());
1898 if (RewriteOp->getType() != ReplacedTy) {
1899 Instruction::CastOps opcode = Instruction::Trunc;
1900 if (ReplacedTy->getPrimitiveSizeInBits() ==
1901 RewriteOp->getType()->getPrimitiveSizeInBits())
1902 opcode = Instruction::BitCast;
1903 RewriteOp = SCEVExpander::InsertCastOfTo(opcode, RewriteOp, ReplacedTy);
1906 SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
1908 // If we had to insert new instructions for RewriteOp, we have to
1909 // consider that they may not have been able to end up immediately
1910 // next to RewriteOp, because non-PHI instructions may never precede
1911 // PHI instructions in a block. In this case, remember where the last
1912 // instruction was inserted so that if we're replacing a different
1913 // PHI node, we can use the later point to expand the final
1915 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1916 if (RewriteOp == User.Phi) NewBasePt = 0;
1918 // Clear the SCEVExpander's expression map so that we are guaranteed
1919 // to have the code emitted where we expect it.
1922 // If we are reusing the iv, then it must be multiplied by a constant
1923 // factor to take advantage of the addressing mode scale component.
1924 if (!isa<SCEVConstant>(RewriteFactor) ||
1925 !cast<SCEVConstant>(RewriteFactor)->isZero()) {
1926 // If we're reusing an IV with a nonzero base (currently this happens
1927 // only when all reuses are outside the loop) subtract that base here.
1928 // The base has been used to initialize the PHI node but we don't want
1930 if (!ReuseIV.Base->isZero()) {
1931 SCEVHandle typedBase = ReuseIV.Base;
1932 if (RewriteExpr->getType()->getPrimitiveSizeInBits() !=
1933 ReuseIV.Base->getType()->getPrimitiveSizeInBits()) {
1934 // It's possible the original IV is a larger type than the new IV,
1935 // in which case we have to truncate the Base. We checked in
1936 // RequiresTypeConversion that this is valid.
1937 assert (RewriteExpr->getType()->getPrimitiveSizeInBits() <
1938 ReuseIV.Base->getType()->getPrimitiveSizeInBits() &&
1939 "Unexpected lengthening conversion!");
1940 typedBase = SE->getTruncateExpr(ReuseIV.Base,
1941 RewriteExpr->getType());
1943 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1946 // Multiply old variable, with base removed, by new scale factor.
1947 RewriteExpr = SE->getMulExpr(RewriteFactor,
1950 // The common base is emitted in the loop preheader. But since we
1951 // are reusing an IV, it has not been used to initialize the PHI node.
1952 // Add it to the expression used to rewrite the uses.
1953 // When this use is outside the loop, we earlier subtracted the
1954 // common base, and are adding it back here. Use the same expression
1955 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1956 if (!isa<ConstantInt>(CommonBaseV) ||
1957 !cast<ConstantInt>(CommonBaseV)->isZero()) {
1958 if (L->contains(User.Inst->getParent()))
1959 RewriteExpr = SE->getAddExpr(RewriteExpr,
1960 SE->getUnknown(CommonBaseV));
1962 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1966 // Now that we know what we need to do, insert code before User for the
1967 // immediate and any loop-variant expressions.
1968 if (!isa<ConstantInt>(BaseV) || !cast<ConstantInt>(BaseV)->isZero())
1969 // Add BaseV to the PHI value if needed.
1970 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1972 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1976 // Mark old value we replaced as possibly dead, so that it is eliminated
1977 // if we just replaced the last use of that value.
1978 DeadInsts.push_back(cast<Instruction>(User.OperandValToReplace));
1980 UsersToProcess.pop_back();
1983 // If there are any more users to process with the same base, process them
1984 // now. We sorted by base above, so we just have to check the last elt.
1985 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1986 // TODO: Next, find out which base index is the most common, pull it out.
1989 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1990 // different starting values, into different PHIs.
1993 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1994 /// set the IV user and stride information and return true, otherwise return
1996 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
1997 const SCEVHandle *&CondStride) {
1998 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e && !CondUse;
2000 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2001 IVUsesByStride.find(StrideOrder[Stride]);
2002 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2004 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2005 E = SI->second.Users.end(); UI != E; ++UI)
2006 if (UI->User == Cond) {
2007 // NOTE: we could handle setcc instructions with multiple uses here, but
2008 // InstCombine does it as well for simple uses, it's not clear that it
2009 // occurs enough in real life to handle.
2011 CondStride = &SI->first;
2019 // Constant strides come first which in turns are sorted by their absolute
2020 // values. If absolute values are the same, then positive strides comes first.
2022 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
2023 struct StrideCompare {
2024 bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
2025 SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
2026 SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
2028 int64_t LV = LHSC->getValue()->getSExtValue();
2029 int64_t RV = RHSC->getValue()->getSExtValue();
2030 uint64_t ALV = (LV < 0) ? -LV : LV;
2031 uint64_t ARV = (RV < 0) ? -RV : RV;
2039 // If it's the same value but different type, sort by bit width so
2040 // that we emit larger induction variables before smaller
2041 // ones, letting the smaller be re-written in terms of larger ones.
2042 return RHS->getBitWidth() < LHS->getBitWidth();
2044 return LHSC && !RHSC;
2049 /// ChangeCompareStride - If a loop termination compare instruction is the
2050 /// only use of its stride, and the compaison is against a constant value,
2051 /// try eliminate the stride by moving the compare instruction to another
2052 /// stride and change its constant operand accordingly. e.g.
2058 /// if (v2 < 10) goto loop
2063 /// if (v1 < 30) goto loop
2064 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
2065 IVStrideUse* &CondUse,
2066 const SCEVHandle* &CondStride) {
2067 if (StrideOrder.size() < 2 ||
2068 IVUsesByStride[*CondStride].Users.size() != 1)
2070 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
2071 if (!SC) return Cond;
2072 ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1));
2073 if (!C) return Cond;
2075 ICmpInst::Predicate Predicate = Cond->getPredicate();
2076 int64_t CmpSSInt = SC->getValue()->getSExtValue();
2077 int64_t CmpVal = C->getValue().getSExtValue();
2078 unsigned BitWidth = C->getValue().getBitWidth();
2079 uint64_t SignBit = 1ULL << (BitWidth-1);
2080 const Type *CmpTy = C->getType();
2081 const Type *NewCmpTy = NULL;
2082 unsigned TyBits = CmpTy->getPrimitiveSizeInBits();
2083 unsigned NewTyBits = 0;
2084 SCEVHandle *NewStride = NULL;
2085 Value *NewCmpLHS = NULL;
2086 Value *NewCmpRHS = NULL;
2089 // Check stride constant and the comparision constant signs to detect
2091 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
2094 // Look for a suitable stride / iv as replacement.
2095 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
2096 for (unsigned i = 0, e = StrideOrder.size(); i != e; ++i) {
2097 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2098 IVUsesByStride.find(StrideOrder[i]);
2099 if (!isa<SCEVConstant>(SI->first))
2101 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
2102 if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
2105 Scale = SSInt / CmpSSInt;
2106 int64_t NewCmpVal = CmpVal * Scale;
2107 APInt Mul = APInt(BitWidth, NewCmpVal);
2108 // Check for overflow.
2109 if (Mul.getSExtValue() != NewCmpVal)
2112 // Watch out for overflow.
2113 if (ICmpInst::isSignedPredicate(Predicate) &&
2114 (CmpVal & SignBit) != (NewCmpVal & SignBit))
2117 if (NewCmpVal == CmpVal)
2119 // Pick the best iv to use trying to avoid a cast.
2121 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2122 E = SI->second.Users.end(); UI != E; ++UI) {
2123 NewCmpLHS = UI->OperandValToReplace;
2124 if (NewCmpLHS->getType() == CmpTy)
2130 NewCmpTy = NewCmpLHS->getType();
2131 NewTyBits = isa<PointerType>(NewCmpTy)
2132 ? UIntPtrTy->getPrimitiveSizeInBits()
2133 : NewCmpTy->getPrimitiveSizeInBits();
2134 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
2135 // Check if it is possible to rewrite it using
2136 // an iv / stride of a smaller integer type.
2137 bool TruncOk = false;
2138 if (NewCmpTy->isInteger()) {
2139 unsigned Bits = NewTyBits;
2140 if (ICmpInst::isSignedPredicate(Predicate))
2142 uint64_t Mask = (1ULL << Bits) - 1;
2143 if (((uint64_t)NewCmpVal & Mask) == (uint64_t)NewCmpVal)
2150 // Don't rewrite if use offset is non-constant and the new type is
2151 // of a different type.
2152 // FIXME: too conservative?
2153 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->Offset))
2156 bool AllUsesAreAddresses = true;
2157 bool AllUsesAreOutsideLoop = true;
2158 std::vector<BasedUser> UsersToProcess;
2159 SCEVHandle CommonExprs = CollectIVUsers(SI->first, SI->second, L,
2160 AllUsesAreAddresses,
2161 AllUsesAreOutsideLoop,
2163 // Avoid rewriting the compare instruction with an iv of new stride
2164 // if it's likely the new stride uses will be rewritten using the
2165 // stride of the compare instruction.
2166 if (AllUsesAreAddresses &&
2167 ValidStride(!CommonExprs->isZero(), Scale, UsersToProcess))
2170 // If scale is negative, use swapped predicate unless it's testing
2172 if (Scale < 0 && !Cond->isEquality())
2173 Predicate = ICmpInst::getSwappedPredicate(Predicate);
2175 NewStride = &StrideOrder[i];
2176 if (!isa<PointerType>(NewCmpTy))
2177 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
2179 NewCmpRHS = ConstantInt::get(UIntPtrTy, NewCmpVal);
2180 NewCmpRHS = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
2181 NewCmpRHS, NewCmpTy);
2186 // Forgo this transformation if it the increment happens to be
2187 // unfortunately positioned after the condition, and the condition
2188 // has multiple uses which prevent it from being moved immediately
2189 // before the branch. See
2190 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2191 // for an example of this situation.
2192 if (!Cond->hasOneUse()) {
2193 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2200 // Create a new compare instruction using new stride / iv.
2201 ICmpInst *OldCond = Cond;
2202 // Insert new compare instruction.
2203 Cond = new ICmpInst(Predicate, NewCmpLHS, NewCmpRHS,
2204 L->getHeader()->getName() + ".termcond",
2207 // Remove the old compare instruction. The old indvar is probably dead too.
2208 DeadInsts.push_back(cast<Instruction>(CondUse->OperandValToReplace));
2209 SE->deleteValueFromRecords(OldCond);
2210 OldCond->replaceAllUsesWith(Cond);
2211 OldCond->eraseFromParent();
2213 IVUsesByStride[*CondStride].Users.pop_back();
2214 SCEVHandle NewOffset = TyBits == NewTyBits
2215 ? SE->getMulExpr(CondUse->Offset,
2216 SE->getConstant(ConstantInt::get(CmpTy, Scale)))
2217 : SE->getConstant(ConstantInt::get(NewCmpTy,
2218 cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
2219 IVUsesByStride[*NewStride].addUser(NewOffset, Cond, NewCmpLHS);
2220 CondUse = &IVUsesByStride[*NewStride].Users.back();
2221 CondStride = NewStride;
2228 /// OptimizeSMax - Rewrite the loop's terminating condition if it uses
2229 /// an smax computation.
2231 /// This is a narrow solution to a specific, but acute, problem. For loops
2237 /// } while (++i < n);
2239 /// where the comparison is signed, the trip count isn't just 'n', because
2240 /// 'n' could be negative. And unfortunately this can come up even for loops
2241 /// where the user didn't use a C do-while loop. For example, seemingly
2242 /// well-behaved top-test loops will commonly be lowered like this:
2248 /// } while (++i < n);
2251 /// and then it's possible for subsequent optimization to obscure the if
2252 /// test in such a way that indvars can't find it.
2254 /// When indvars can't find the if test in loops like this, it creates a
2255 /// signed-max expression, which allows it to give the loop a canonical
2256 /// induction variable:
2259 /// smax = n < 1 ? 1 : n;
2262 /// } while (++i != smax);
2264 /// Canonical induction variables are necessary because the loop passes
2265 /// are designed around them. The most obvious example of this is the
2266 /// LoopInfo analysis, which doesn't remember trip count values. It
2267 /// expects to be able to rediscover the trip count each time it is
2268 /// needed, and it does this using a simple analyis that only succeeds if
2269 /// the loop has a canonical induction variable.
2271 /// However, when it comes time to generate code, the maximum operation
2272 /// can be quite costly, especially if it's inside of an outer loop.
2274 /// This function solves this problem by detecting this type of loop and
2275 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2276 /// the instructions for the maximum computation.
2278 ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond,
2279 IVStrideUse* &CondUse) {
2280 // Check that the loop matches the pattern we're looking for.
2281 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2282 Cond->getPredicate() != CmpInst::ICMP_NE)
2285 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2286 if (!Sel || !Sel->hasOneUse()) return Cond;
2288 SCEVHandle IterationCount = SE->getIterationCount(L);
2289 if (isa<SCEVCouldNotCompute>(IterationCount))
2291 SCEVHandle One = SE->getIntegerSCEV(1, IterationCount->getType());
2293 // Adjust for an annoying getIterationCount quirk.
2294 IterationCount = SE->getAddExpr(IterationCount, One);
2296 // Check for a max calculation that matches the pattern.
2297 SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(IterationCount);
2298 if (!SMax || SMax != SE->getSCEV(Sel)) return Cond;
2300 SCEVHandle SMaxLHS = SMax->getOperand(0);
2301 SCEVHandle SMaxRHS = SMax->getOperand(1);
2302 if (!SMaxLHS || SMaxLHS != One) return Cond;
2304 // Check the relevant induction variable for conformance to
2306 SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
2307 SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2308 if (!AR || !AR->isAffine() ||
2309 AR->getStart() != One ||
2310 AR->getStepRecurrence(*SE) != One)
2313 // Check the right operand of the select, and remember it, as it will
2314 // be used in the new comparison instruction.
2316 if (SE->getSCEV(Sel->getOperand(1)) == SMaxRHS)
2317 NewRHS = Sel->getOperand(1);
2318 else if (SE->getSCEV(Sel->getOperand(2)) == SMaxRHS)
2319 NewRHS = Sel->getOperand(2);
2320 if (!NewRHS) return Cond;
2322 // Ok, everything looks ok to change the condition into an SLT or SGE and
2323 // delete the max calculation.
2325 new ICmpInst(Cond->getPredicate() == CmpInst::ICMP_NE ?
2328 Cond->getOperand(0), NewRHS, "scmp", Cond);
2330 // Delete the max calculation instructions.
2331 SE->deleteValueFromRecords(Cond);
2332 Cond->replaceAllUsesWith(NewCond);
2333 Cond->eraseFromParent();
2334 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2335 SE->deleteValueFromRecords(Sel);
2336 Sel->eraseFromParent();
2337 if (Cmp->use_empty()) {
2338 SE->deleteValueFromRecords(Cmp);
2339 Cmp->eraseFromParent();
2341 CondUse->User = NewCond;
2345 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2346 /// inside the loop then try to eliminate the cast opeation.
2347 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2349 SCEVHandle IterationCount = SE->getIterationCount(L);
2350 if (isa<SCEVCouldNotCompute>(IterationCount))
2353 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e;
2355 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2356 IVUsesByStride.find(StrideOrder[Stride]);
2357 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2358 if (!isa<SCEVConstant>(SI->first))
2361 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2362 E = SI->second.Users.end(); UI != E; /* empty */) {
2363 std::vector<IVStrideUse>::iterator CandidateUI = UI;
2365 Instruction *ShadowUse = CandidateUI->User;
2366 const Type *DestTy = NULL;
2368 /* If shadow use is a int->float cast then insert a second IV
2369 to eliminate this cast.
2371 for (unsigned i = 0; i < n; ++i)
2377 for (unsigned i = 0; i < n; ++i, ++d)
2380 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->User))
2381 DestTy = UCast->getDestTy();
2382 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->User))
2383 DestTy = SCast->getDestTy();
2384 if (!DestTy) continue;
2387 /* If target does not support DestTy natively then do not apply
2388 this transformation. */
2389 MVT DVT = TLI->getValueType(DestTy);
2390 if (!TLI->isTypeLegal(DVT)) continue;
2393 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2395 if (PH->getNumIncomingValues() != 2) continue;
2397 const Type *SrcTy = PH->getType();
2398 int Mantissa = DestTy->getFPMantissaWidth();
2399 if (Mantissa == -1) continue;
2400 if ((int)TD->getTypeSizeInBits(SrcTy) > Mantissa)
2403 unsigned Entry, Latch;
2404 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2412 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2413 if (!Init) continue;
2414 ConstantFP *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2416 BinaryOperator *Incr =
2417 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2418 if (!Incr) continue;
2419 if (Incr->getOpcode() != Instruction::Add
2420 && Incr->getOpcode() != Instruction::Sub)
2423 /* Initialize new IV, double d = 0.0 in above example. */
2424 ConstantInt *C = NULL;
2425 if (Incr->getOperand(0) == PH)
2426 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2427 else if (Incr->getOperand(1) == PH)
2428 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2434 /* Add new PHINode. */
2435 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2437 /* create new increment. '++d' in above example. */
2438 ConstantFP *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2439 BinaryOperator *NewIncr =
2440 BinaryOperator::Create(Incr->getOpcode(),
2441 NewPH, CFP, "IV.S.next.", Incr);
2443 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2444 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2446 /* Remove cast operation */
2447 SE->deleteValueFromRecords(ShadowUse);
2448 ShadowUse->replaceAllUsesWith(NewPH);
2449 ShadowUse->eraseFromParent();
2450 SI->second.Users.erase(CandidateUI);
2457 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2458 // uses in the loop, look to see if we can eliminate some, in favor of using
2459 // common indvars for the different uses.
2460 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2461 // TODO: implement optzns here.
2463 OptimizeShadowIV(L);
2465 // Finally, get the terminating condition for the loop if possible. If we
2466 // can, we want to change it to use a post-incremented version of its
2467 // induction variable, to allow coalescing the live ranges for the IV into
2468 // one register value.
2469 PHINode *SomePHI = cast<PHINode>(L->getHeader()->begin());
2470 BasicBlock *Preheader = L->getLoopPreheader();
2471 BasicBlock *LatchBlock =
2472 SomePHI->getIncomingBlock(SomePHI->getIncomingBlock(0) == Preheader);
2473 BranchInst *TermBr = dyn_cast<BranchInst>(LatchBlock->getTerminator());
2474 if (!TermBr || TermBr->isUnconditional() ||
2475 !isa<ICmpInst>(TermBr->getCondition()))
2477 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2479 // Search IVUsesByStride to find Cond's IVUse if there is one.
2480 IVStrideUse *CondUse = 0;
2481 const SCEVHandle *CondStride = 0;
2483 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2484 return; // setcc doesn't use the IV.
2486 // If the trip count is computed in terms of an smax (due to ScalarEvolution
2487 // being unable to find a sufficient guard, for example), change the loop
2488 // comparison to use SLT instead of NE.
2489 Cond = OptimizeSMax(L, Cond, CondUse);
2491 // If possible, change stride and operands of the compare instruction to
2492 // eliminate one stride.
2493 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2495 // It's possible for the setcc instruction to be anywhere in the loop, and
2496 // possible for it to have multiple users. If it is not immediately before
2497 // the latch block branch, move it.
2498 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2499 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2500 Cond->moveBefore(TermBr);
2502 // Otherwise, clone the terminating condition and insert into the loopend.
2503 Cond = cast<ICmpInst>(Cond->clone());
2504 Cond->setName(L->getHeader()->getName() + ".termcond");
2505 LatchBlock->getInstList().insert(TermBr, Cond);
2507 // Clone the IVUse, as the old use still exists!
2508 IVUsesByStride[*CondStride].addUser(CondUse->Offset, Cond,
2509 CondUse->OperandValToReplace);
2510 CondUse = &IVUsesByStride[*CondStride].Users.back();
2514 // If we get to here, we know that we can transform the setcc instruction to
2515 // use the post-incremented version of the IV, allowing us to coalesce the
2516 // live ranges for the IV correctly.
2517 CondUse->Offset = SE->getMinusSCEV(CondUse->Offset, *CondStride);
2518 CondUse->isUseOfPostIncrementedValue = true;
2522 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2524 LI = &getAnalysis<LoopInfo>();
2525 DT = &getAnalysis<DominatorTree>();
2526 SE = &getAnalysis<ScalarEvolution>();
2527 TD = &getAnalysis<TargetData>();
2528 UIntPtrTy = TD->getIntPtrType();
2531 // Find all uses of induction variables in this loop, and categorize
2532 // them by stride. Start by finding all of the PHI nodes in the header for
2533 // this loop. If they are induction variables, inspect their uses.
2534 SmallPtrSet<Instruction*,16> Processed; // Don't reprocess instructions.
2535 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
2536 AddUsersIfInteresting(I, L, Processed);
2538 if (!IVUsesByStride.empty()) {
2539 // Optimize induction variables. Some indvar uses can be transformed to use
2540 // strides that will be needed for other purposes. A common example of this
2541 // is the exit test for the loop, which can often be rewritten to use the
2542 // computation of some other indvar to decide when to terminate the loop.
2545 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
2546 // doing computation in byte values, promote to 32-bit values if safe.
2548 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2549 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2550 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2551 // Need to be careful that IV's are all the same type. Only works for
2552 // intptr_t indvars.
2554 // If we only have one stride, we can more aggressively eliminate some
2556 bool HasOneStride = IVUsesByStride.size() == 1;
2559 DOUT << "\nLSR on ";
2563 // IVsByStride keeps IVs for one particular loop.
2564 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2566 // Sort the StrideOrder so we process larger strides first.
2567 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
2569 // Note: this processes each stride/type pair individually. All users
2570 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2571 // Also, note that we iterate over IVUsesByStride indirectly by using
2572 // StrideOrder. This extra layer of indirection makes the ordering of
2573 // strides deterministic - not dependent on map order.
2574 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
2575 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2576 IVUsesByStride.find(StrideOrder[Stride]);
2577 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2578 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
2582 // We're done analyzing this loop; release all the state we built up for it.
2583 CastedPointers.clear();
2584 IVUsesByStride.clear();
2585 IVsByStride.clear();
2586 StrideOrder.clear();
2587 for (unsigned i=0; i<GEPlist.size(); i++)
2588 SE->deleteValueFromRecords(GEPlist[i]);
2591 // Clean up after ourselves
2592 if (!DeadInsts.empty()) {
2593 DeleteTriviallyDeadInstructions();
2595 BasicBlock::iterator I = L->getHeader()->begin();
2596 while (PHINode *PN = dyn_cast<PHINode>(I++)) {
2597 // At this point, we know that we have killed one or more IV users.
2598 // It is worth checking to see if the cannonical indvar is also
2599 // dead, so that we can remove it as well.
2601 // We can remove a PHI if it is on a cycle in the def-use graph
2602 // where each node in the cycle has degree one, i.e. only one use,
2603 // and is an instruction with no side effects.
2605 // FIXME: this needs to eliminate an induction variable even if it's being
2606 // compared against some value to decide loop termination.
2607 if (!PN->hasOneUse())
2610 SmallPtrSet<PHINode *, 4> PHIs;
2611 for (Instruction *J = dyn_cast<Instruction>(*PN->use_begin());
2612 J && J->hasOneUse() && !J->mayWriteToMemory();
2613 J = dyn_cast<Instruction>(*J->use_begin())) {
2614 // If we find the original PHI, we've discovered a cycle.
2616 // Break the cycle and mark the PHI for deletion.
2617 SE->deleteValueFromRecords(PN);
2618 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
2619 DeadInsts.push_back(PN);
2623 // If we find a PHI more than once, we're on a cycle that
2624 // won't prove fruitful.
2625 if (isa<PHINode>(J) && !PHIs.insert(cast<PHINode>(J)))
2629 DeleteTriviallyDeadInstructions();