1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Type.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Analysis/Dominators.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Analysis/ScalarEvolutionExpander.h"
29 #include "llvm/Support/CFG.h"
30 #include "llvm/Support/GetElementPtrTypeIterator.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Target/TargetLowering.h"
43 STATISTIC(NumReduced , "Number of GEPs strength reduced");
44 STATISTIC(NumInserted, "Number of PHIs inserted");
45 STATISTIC(NumVariable, "Number of PHIs with variable strides");
46 STATISTIC(NumEliminated , "Number of strides eliminated");
52 /// IVStrideUse - Keep track of one use of a strided induction variable, where
53 /// the stride is stored externally. The Offset member keeps track of the
54 /// offset from the IV, User is the actual user of the operand, and
55 /// 'OperandValToReplace' is the operand of the User that is the use.
56 struct VISIBILITY_HIDDEN IVStrideUse {
59 Value *OperandValToReplace;
61 // isUseOfPostIncrementedValue - True if this should use the
62 // post-incremented version of this IV, not the preincremented version.
63 // This can only be set in special cases, such as the terminating setcc
64 // instruction for a loop or uses dominated by the loop.
65 bool isUseOfPostIncrementedValue;
67 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
68 : Offset(Offs), User(U), OperandValToReplace(O),
69 isUseOfPostIncrementedValue(false) {}
72 /// IVUsersOfOneStride - This structure keeps track of all instructions that
73 /// have an operand that is based on the trip count multiplied by some stride.
74 /// The stride for all of these users is common and kept external to this
76 struct VISIBILITY_HIDDEN IVUsersOfOneStride {
77 /// Users - Keep track of all of the users of this stride as well as the
78 /// initial value and the operand that uses the IV.
79 std::vector<IVStrideUse> Users;
81 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
82 Users.push_back(IVStrideUse(Offset, User, Operand));
86 /// IVInfo - This structure keeps track of one IV expression inserted during
87 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
88 /// well as the PHI node and increment value created for rewrite.
89 struct VISIBILITY_HIDDEN IVExpr {
95 IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi,
97 : Stride(stride), Base(base), PHI(phi), IncV(incv) {}
100 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
101 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
102 struct VISIBILITY_HIDDEN IVsOfOneStride {
103 std::vector<IVExpr> IVs;
105 void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI,
107 IVs.push_back(IVExpr(Stride, Base, PHI, IncV));
111 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
115 const TargetData *TD;
116 const Type *UIntPtrTy;
119 /// IVUsesByStride - Keep track of all uses of induction variables that we
120 /// are interested in. The key of the map is the stride of the access.
121 std::map<SCEVHandle, IVUsersOfOneStride> IVUsesByStride;
123 /// IVsByStride - Keep track of all IVs that have been inserted for a
124 /// particular stride.
125 std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
127 /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
128 /// We use this to iterate over the IVUsesByStride collection without being
129 /// dependent on random ordering of pointers in the process.
130 SmallVector<SCEVHandle, 16> StrideOrder;
132 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
133 /// of the casted version of each value. This is accessed by
134 /// getCastedVersionOf.
135 DenseMap<Value*, Value*> CastedPointers;
137 /// DeadInsts - Keep track of instructions we may have made dead, so that
138 /// we can remove them after we are done working.
139 SmallPtrSet<Instruction*,16> DeadInsts;
141 /// TLI - Keep a pointer of a TargetLowering to consult for determining
142 /// transformation profitability.
143 const TargetLowering *TLI;
146 static char ID; // Pass ID, replacement for typeid
147 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
148 LoopPass((intptr_t)&ID), TLI(tli) {
151 bool runOnLoop(Loop *L, LPPassManager &LPM);
153 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
154 // We split critical edges, so we change the CFG. However, we do update
155 // many analyses if they are around.
156 AU.addPreservedID(LoopSimplifyID);
157 AU.addPreserved<LoopInfo>();
158 AU.addPreserved<DominanceFrontier>();
159 AU.addPreserved<DominatorTree>();
161 AU.addRequiredID(LoopSimplifyID);
162 AU.addRequired<LoopInfo>();
163 AU.addRequired<DominatorTree>();
164 AU.addRequired<TargetData>();
165 AU.addRequired<ScalarEvolution>();
168 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
170 Value *getCastedVersionOf(Instruction::CastOps opcode, Value *V);
172 bool AddUsersIfInteresting(Instruction *I, Loop *L,
173 SmallPtrSet<Instruction*,16> &Processed);
174 SCEVHandle GetExpressionSCEV(Instruction *E);
175 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
176 IVStrideUse* &CondUse,
177 const SCEVHandle* &CondStride);
178 void OptimizeIndvars(Loop *L);
179 bool FindIVForUser(ICmpInst *Cond, IVStrideUse *&CondUse,
180 const SCEVHandle *&CondStride);
181 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
182 unsigned CheckForIVReuse(bool, bool, const SCEVHandle&,
183 IVExpr&, const Type*,
184 const std::vector<BasedUser>& UsersToProcess);
185 bool ValidStride(bool, int64_t,
186 const std::vector<BasedUser>& UsersToProcess);
187 SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
188 IVUsersOfOneStride &Uses,
190 bool &AllUsesAreAddresses,
191 std::vector<BasedUser> &UsersToProcess);
192 void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
193 IVUsersOfOneStride &Uses,
194 Loop *L, bool isOnlyStride);
195 void DeleteTriviallyDeadInstructions(SmallPtrSet<Instruction*,16> &Insts);
199 char LoopStrengthReduce::ID = 0;
200 static RegisterPass<LoopStrengthReduce>
201 X("loop-reduce", "Loop Strength Reduction");
203 LoopPass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
204 return new LoopStrengthReduce(TLI);
207 /// getCastedVersionOf - Return the specified value casted to uintptr_t. This
208 /// assumes that the Value* V is of integer or pointer type only.
210 Value *LoopStrengthReduce::getCastedVersionOf(Instruction::CastOps opcode,
212 if (V->getType() == UIntPtrTy) return V;
213 if (Constant *CB = dyn_cast<Constant>(V))
214 return ConstantExpr::getCast(opcode, CB, UIntPtrTy);
216 Value *&New = CastedPointers[V];
219 New = SCEVExpander::InsertCastOfTo(opcode, V, UIntPtrTy);
220 DeadInsts.insert(cast<Instruction>(New));
225 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
226 /// specified set are trivially dead, delete them and see if this makes any of
227 /// their operands subsequently dead.
228 void LoopStrengthReduce::
229 DeleteTriviallyDeadInstructions(SmallPtrSet<Instruction*,16> &Insts) {
230 while (!Insts.empty()) {
231 Instruction *I = *Insts.begin();
234 if (PHINode *PN = dyn_cast<PHINode>(I)) {
235 // If all incoming values to the Phi are the same, we can replace the Phi
237 if (Value *PNV = PN->hasConstantValue()) {
238 if (Instruction *U = dyn_cast<Instruction>(PNV))
240 SE->deleteValueFromRecords(PN);
241 PN->replaceAllUsesWith(PNV);
242 PN->eraseFromParent();
248 if (isInstructionTriviallyDead(I)) {
249 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
250 if (Instruction *U = dyn_cast<Instruction>(*i))
252 SE->deleteValueFromRecords(I);
253 I->eraseFromParent();
260 /// GetExpressionSCEV - Compute and return the SCEV for the specified
262 SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
263 // Pointer to pointer bitcast instructions return the same value as their
265 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Exp)) {
266 if (SE->hasSCEV(BCI) || !isa<Instruction>(BCI->getOperand(0)))
267 return SE->getSCEV(BCI);
268 SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)));
273 // Scalar Evolutions doesn't know how to compute SCEV's for GEP instructions.
274 // If this is a GEP that SE doesn't know about, compute it now and insert it.
275 // If this is not a GEP, or if we have already done this computation, just let
277 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Exp);
278 if (!GEP || SE->hasSCEV(GEP))
279 return SE->getSCEV(Exp);
281 // Analyze all of the subscripts of this getelementptr instruction, looking
282 // for uses that are determined by the trip count of the loop. First, skip
283 // all operands the are not dependent on the IV.
285 // Build up the base expression. Insert an LLVM cast of the pointer to
287 SCEVHandle GEPVal = SE->getUnknown(
288 getCastedVersionOf(Instruction::PtrToInt, GEP->getOperand(0)));
290 gep_type_iterator GTI = gep_type_begin(GEP);
292 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
293 i != e; ++i, ++GTI) {
294 // If this is a use of a recurrence that we can analyze, and it comes before
295 // Op does in the GEP operand list, we will handle this when we process this
297 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
298 const StructLayout *SL = TD->getStructLayout(STy);
299 unsigned Idx = cast<ConstantInt>(*i)->getZExtValue();
300 uint64_t Offset = SL->getElementOffset(Idx);
301 GEPVal = SE->getAddExpr(GEPVal,
302 SE->getIntegerSCEV(Offset, UIntPtrTy));
304 unsigned GEPOpiBits =
305 (*i)->getType()->getPrimitiveSizeInBits();
306 unsigned IntPtrBits = UIntPtrTy->getPrimitiveSizeInBits();
307 Instruction::CastOps opcode = (GEPOpiBits < IntPtrBits ?
308 Instruction::SExt : (GEPOpiBits > IntPtrBits ? Instruction::Trunc :
309 Instruction::BitCast));
310 Value *OpVal = getCastedVersionOf(opcode, *i);
311 SCEVHandle Idx = SE->getSCEV(OpVal);
313 uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType());
315 Idx = SE->getMulExpr(Idx,
316 SE->getConstant(ConstantInt::get(UIntPtrTy,
318 GEPVal = SE->getAddExpr(GEPVal, Idx);
322 SE->setSCEV(GEP, GEPVal);
326 /// getSCEVStartAndStride - Compute the start and stride of this expression,
327 /// returning false if the expression is not a start/stride pair, or true if it
328 /// is. The stride must be a loop invariant expression, but the start may be
329 /// a mix of loop invariant and loop variant expressions.
330 static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
331 SCEVHandle &Start, SCEVHandle &Stride,
332 ScalarEvolution *SE) {
333 SCEVHandle TheAddRec = Start; // Initialize to zero.
335 // If the outer level is an AddExpr, the operands are all start values except
336 // for a nested AddRecExpr.
337 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
338 for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
339 if (SCEVAddRecExpr *AddRec =
340 dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
341 if (AddRec->getLoop() == L)
342 TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
344 return false; // Nested IV of some sort?
346 Start = SE->getAddExpr(Start, AE->getOperand(i));
349 } else if (isa<SCEVAddRecExpr>(SH)) {
352 return false; // not analyzable.
355 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
356 if (!AddRec || AddRec->getLoop() != L) return false;
358 // FIXME: Generalize to non-affine IV's.
359 if (!AddRec->isAffine()) return false;
361 Start = SE->getAddExpr(Start, AddRec->getOperand(0));
363 if (!isa<SCEVConstant>(AddRec->getOperand(1)))
364 DOUT << "[" << L->getHeader()->getName()
365 << "] Variable stride: " << *AddRec << "\n";
367 Stride = AddRec->getOperand(1);
371 /// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
372 /// and now we need to decide whether the user should use the preinc or post-inc
373 /// value. If this user should use the post-inc version of the IV, return true.
375 /// Choosing wrong here can break dominance properties (if we choose to use the
376 /// post-inc value when we cannot) or it can end up adding extra live-ranges to
377 /// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
378 /// should use the post-inc value).
379 static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
380 Loop *L, DominatorTree *DT, Pass *P,
381 SmallPtrSet<Instruction*,16> &DeadInsts){
382 // If the user is in the loop, use the preinc value.
383 if (L->contains(User->getParent())) return false;
385 BasicBlock *LatchBlock = L->getLoopLatch();
387 // Ok, the user is outside of the loop. If it is dominated by the latch
388 // block, use the post-inc value.
389 if (DT->dominates(LatchBlock, User->getParent()))
392 // There is one case we have to be careful of: PHI nodes. These little guys
393 // can live in blocks that do not dominate the latch block, but (since their
394 // uses occur in the predecessor block, not the block the PHI lives in) should
395 // still use the post-inc value. Check for this case now.
396 PHINode *PN = dyn_cast<PHINode>(User);
397 if (!PN) return false; // not a phi, not dominated by latch block.
399 // Look at all of the uses of IV by the PHI node. If any use corresponds to
400 // a block that is not dominated by the latch block, give up and use the
401 // preincremented value.
402 unsigned NumUses = 0;
403 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
404 if (PN->getIncomingValue(i) == IV) {
406 if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
410 // Okay, all uses of IV by PN are in predecessor blocks that really are
411 // dominated by the latch block. Split the critical edges and use the
412 // post-incremented value.
413 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
414 if (PN->getIncomingValue(i) == IV) {
415 SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
416 // Splitting the critical edge can reduce the number of entries in this
418 e = PN->getNumIncomingValues();
419 if (--NumUses == 0) break;
422 // PHI node might have become a constant value after SplitCriticalEdge.
423 DeadInsts.insert(User);
430 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
431 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
432 /// return true. Otherwise, return false.
433 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
434 SmallPtrSet<Instruction*,16> &Processed) {
435 if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
436 return false; // Void and FP expressions cannot be reduced.
437 if (!Processed.insert(I))
438 return true; // Instruction already handled.
440 // Get the symbolic expression for this instruction.
441 SCEVHandle ISE = GetExpressionSCEV(I);
442 if (isa<SCEVCouldNotCompute>(ISE)) return false;
444 // Get the start and stride for this expression.
445 SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType());
446 SCEVHandle Stride = Start;
447 if (!getSCEVStartAndStride(ISE, L, Start, Stride, SE))
448 return false; // Non-reducible symbolic expression, bail out.
450 std::vector<Instruction *> IUsers;
451 // Collect all I uses now because IVUseShouldUsePostIncValue may
452 // invalidate use_iterator.
453 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
454 IUsers.push_back(cast<Instruction>(*UI));
456 for (unsigned iused_index = 0, iused_size = IUsers.size();
457 iused_index != iused_size; ++iused_index) {
459 Instruction *User = IUsers[iused_index];
461 // Do not infinitely recurse on PHI nodes.
462 if (isa<PHINode>(User) && Processed.count(User))
465 // If this is an instruction defined in a nested loop, or outside this loop,
466 // don't recurse into it.
467 bool AddUserToIVUsers = false;
468 if (LI->getLoopFor(User->getParent()) != L) {
469 DOUT << "FOUND USER in other loop: " << *User
470 << " OF SCEV: " << *ISE << "\n";
471 AddUserToIVUsers = true;
472 } else if (!AddUsersIfInteresting(User, L, Processed)) {
473 DOUT << "FOUND USER: " << *User
474 << " OF SCEV: " << *ISE << "\n";
475 AddUserToIVUsers = true;
478 if (AddUserToIVUsers) {
479 IVUsersOfOneStride &StrideUses = IVUsesByStride[Stride];
480 if (StrideUses.Users.empty()) // First occurance of this stride?
481 StrideOrder.push_back(Stride);
483 // Okay, we found a user that we cannot reduce. Analyze the instruction
484 // and decide what to do with it. If we are a use inside of the loop, use
485 // the value before incrementation, otherwise use it after incrementation.
486 if (IVUseShouldUsePostIncValue(User, I, L, DT, this, DeadInsts)) {
487 // The value used will be incremented by the stride more than we are
488 // expecting, so subtract this off.
489 SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
490 StrideUses.addUser(NewStart, User, I);
491 StrideUses.Users.back().isUseOfPostIncrementedValue = true;
492 DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n";
494 StrideUses.addUser(Start, User, I);
502 /// BasedUser - For a particular base value, keep information about how we've
503 /// partitioned the expression so far.
505 /// SE - The current ScalarEvolution object.
508 /// Base - The Base value for the PHI node that needs to be inserted for
509 /// this use. As the use is processed, information gets moved from this
510 /// field to the Imm field (below). BasedUser values are sorted by this
514 /// Inst - The instruction using the induction variable.
517 /// OperandValToReplace - The operand value of Inst to replace with the
519 Value *OperandValToReplace;
521 /// Imm - The immediate value that should be added to the base immediately
522 /// before Inst, because it will be folded into the imm field of the
526 /// EmittedBase - The actual value* to use for the base value of this
527 /// operation. This is null if we should just use zero so far.
530 // isUseOfPostIncrementedValue - True if this should use the
531 // post-incremented version of this IV, not the preincremented version.
532 // This can only be set in special cases, such as the terminating setcc
533 // instruction for a loop and uses outside the loop that are dominated by
535 bool isUseOfPostIncrementedValue;
537 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
538 : SE(se), Base(IVSU.Offset), Inst(IVSU.User),
539 OperandValToReplace(IVSU.OperandValToReplace),
540 Imm(SE->getIntegerSCEV(0, Base->getType())), EmittedBase(0),
541 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue) {}
543 // Once we rewrite the code to insert the new IVs we want, update the
544 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
546 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
547 Instruction *InsertPt,
548 SCEVExpander &Rewriter, Loop *L, Pass *P,
549 SmallPtrSet<Instruction*,16> &DeadInsts);
551 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
552 SCEVExpander &Rewriter,
553 Instruction *IP, Loop *L);
558 void BasedUser::dump() const {
559 cerr << " Base=" << *Base;
560 cerr << " Imm=" << *Imm;
562 cerr << " EB=" << *EmittedBase;
564 cerr << " Inst: " << *Inst;
567 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
568 SCEVExpander &Rewriter,
569 Instruction *IP, Loop *L) {
570 // Figure out where we *really* want to insert this code. In particular, if
571 // the user is inside of a loop that is nested inside of L, we really don't
572 // want to insert this expression before the user, we'd rather pull it out as
573 // many loops as possible.
574 LoopInfo &LI = Rewriter.getLoopInfo();
575 Instruction *BaseInsertPt = IP;
577 // Figure out the most-nested loop that IP is in.
578 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
580 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
581 // the preheader of the outer-most loop where NewBase is not loop invariant.
582 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
583 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
584 InsertLoop = InsertLoop->getParentLoop();
587 // If there is no immediate value, skip the next part.
588 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
589 if (SC->getValue()->isZero())
590 return Rewriter.expandCodeFor(NewBase, BaseInsertPt);
592 Value *Base = Rewriter.expandCodeFor(NewBase, BaseInsertPt);
594 // If we are inserting the base and imm values in the same block, make sure to
595 // adjust the IP position if insertion reused a result.
596 if (IP == BaseInsertPt)
597 IP = Rewriter.getInsertionPoint();
599 // Always emit the immediate (if non-zero) into the same block as the user.
600 SCEVHandle NewValSCEV = SE->getAddExpr(SE->getUnknown(Base), Imm);
601 return Rewriter.expandCodeFor(NewValSCEV, IP);
606 // Once we rewrite the code to insert the new IVs we want, update the
607 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
608 // to it. NewBasePt is the last instruction which contributes to the
609 // value of NewBase in the case that it's a diffferent instruction from
610 // the PHI that NewBase is computed from, or null otherwise.
612 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
613 Instruction *NewBasePt,
614 SCEVExpander &Rewriter, Loop *L, Pass *P,
615 SmallPtrSet<Instruction*,16> &DeadInsts) {
616 if (!isa<PHINode>(Inst)) {
617 // By default, insert code at the user instruction.
618 BasicBlock::iterator InsertPt = Inst;
620 // However, if the Operand is itself an instruction, the (potentially
621 // complex) inserted code may be shared by many users. Because of this, we
622 // want to emit code for the computation of the operand right before its old
623 // computation. This is usually safe, because we obviously used to use the
624 // computation when it was computed in its current block. However, in some
625 // cases (e.g. use of a post-incremented induction variable) the NewBase
626 // value will be pinned to live somewhere after the original computation.
627 // In this case, we have to back off.
628 if (!isUseOfPostIncrementedValue) {
629 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
630 InsertPt = NewBasePt;
632 } else if (Instruction *OpInst
633 = dyn_cast<Instruction>(OperandValToReplace)) {
635 while (isa<PHINode>(InsertPt)) ++InsertPt;
638 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
639 // Adjust the type back to match the Inst. Note that we can't use InsertPt
640 // here because the SCEVExpander may have inserted the instructions after
641 // that point, in its efforts to avoid inserting redundant expressions.
642 if (isa<PointerType>(OperandValToReplace->getType())) {
643 NewVal = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
645 OperandValToReplace->getType());
647 // Replace the use of the operand Value with the new Phi we just created.
648 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
649 DOUT << " CHANGED: IMM =" << *Imm;
650 DOUT << " \tNEWBASE =" << *NewBase;
651 DOUT << " \tInst = " << *Inst;
655 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
656 // expression into each operand block that uses it. Note that PHI nodes can
657 // have multiple entries for the same predecessor. We use a map to make sure
658 // that a PHI node only has a single Value* for each predecessor (which also
659 // prevents us from inserting duplicate code in some blocks).
660 DenseMap<BasicBlock*, Value*> InsertedCode;
661 PHINode *PN = cast<PHINode>(Inst);
662 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
663 if (PN->getIncomingValue(i) == OperandValToReplace) {
664 // If this is a critical edge, split the edge so that we do not insert the
665 // code on all predecessor/successor paths. We do this unless this is the
666 // canonical backedge for this loop, as this can make some inserted code
667 // be in an illegal position.
668 BasicBlock *PHIPred = PN->getIncomingBlock(i);
669 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
670 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
672 // First step, split the critical edge.
673 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
675 // Next step: move the basic block. In particular, if the PHI node
676 // is outside of the loop, and PredTI is in the loop, we want to
677 // move the block to be immediately before the PHI block, not
678 // immediately after PredTI.
679 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
680 BasicBlock *NewBB = PN->getIncomingBlock(i);
681 NewBB->moveBefore(PN->getParent());
684 // Splitting the edge can reduce the number of PHI entries we have.
685 e = PN->getNumIncomingValues();
688 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
690 // Insert the code into the end of the predecessor block.
691 Instruction *InsertPt = PN->getIncomingBlock(i)->getTerminator();
692 Code = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
694 // Adjust the type back to match the PHI. Note that we can't use
695 // InsertPt here because the SCEVExpander may have inserted its
696 // instructions after that point, in its efforts to avoid inserting
697 // redundant expressions.
698 if (isa<PointerType>(PN->getType())) {
699 Code = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
705 // Replace the use of the operand Value with the new Phi we just created.
706 PN->setIncomingValue(i, Code);
711 // PHI node might have become a constant value after SplitCriticalEdge.
712 DeadInsts.insert(Inst);
714 DOUT << " CHANGED: IMM =" << *Imm << " Inst = " << *Inst;
718 /// isTargetConstant - Return true if the following can be referenced by the
719 /// immediate field of a target instruction.
720 static bool isTargetConstant(const SCEVHandle &V, const Type *UseTy,
721 const TargetLowering *TLI) {
722 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
723 int64_t VC = SC->getValue()->getSExtValue();
725 TargetLowering::AddrMode AM;
727 return TLI->isLegalAddressingMode(AM, UseTy);
729 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
730 return (VC > -(1 << 16) && VC < (1 << 16)-1);
734 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
735 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
736 if (TLI && CE->getOpcode() == Instruction::PtrToInt) {
737 Constant *Op0 = CE->getOperand(0);
738 if (GlobalValue *GV = dyn_cast<GlobalValue>(Op0)) {
739 TargetLowering::AddrMode AM;
741 return TLI->isLegalAddressingMode(AM, UseTy);
747 /// MoveLoopVariantsToImediateField - Move any subexpressions from Val that are
748 /// loop varying to the Imm operand.
749 static void MoveLoopVariantsToImediateField(SCEVHandle &Val, SCEVHandle &Imm,
750 Loop *L, ScalarEvolution *SE) {
751 if (Val->isLoopInvariant(L)) return; // Nothing to do.
753 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
754 std::vector<SCEVHandle> NewOps;
755 NewOps.reserve(SAE->getNumOperands());
757 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
758 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
759 // If this is a loop-variant expression, it must stay in the immediate
760 // field of the expression.
761 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
763 NewOps.push_back(SAE->getOperand(i));
767 Val = SE->getIntegerSCEV(0, Val->getType());
769 Val = SE->getAddExpr(NewOps);
770 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
771 // Try to pull immediates out of the start value of nested addrec's.
772 SCEVHandle Start = SARE->getStart();
773 MoveLoopVariantsToImediateField(Start, Imm, L, SE);
775 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
777 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
779 // Otherwise, all of Val is variant, move the whole thing over.
780 Imm = SE->getAddExpr(Imm, Val);
781 Val = SE->getIntegerSCEV(0, Val->getType());
786 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
787 /// that can fit into the immediate field of instructions in the target.
788 /// Accumulate these immediate values into the Imm value.
789 static void MoveImmediateValues(const TargetLowering *TLI,
791 SCEVHandle &Val, SCEVHandle &Imm,
792 bool isAddress, Loop *L,
793 ScalarEvolution *SE) {
794 const Type *UseTy = User->getType();
795 if (StoreInst *SI = dyn_cast<StoreInst>(User))
796 UseTy = SI->getOperand(0)->getType();
798 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
799 std::vector<SCEVHandle> NewOps;
800 NewOps.reserve(SAE->getNumOperands());
802 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
803 SCEVHandle NewOp = SAE->getOperand(i);
804 MoveImmediateValues(TLI, User, NewOp, Imm, isAddress, L, SE);
806 if (!NewOp->isLoopInvariant(L)) {
807 // If this is a loop-variant expression, it must stay in the immediate
808 // field of the expression.
809 Imm = SE->getAddExpr(Imm, NewOp);
811 NewOps.push_back(NewOp);
816 Val = SE->getIntegerSCEV(0, Val->getType());
818 Val = SE->getAddExpr(NewOps);
820 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
821 // Try to pull immediates out of the start value of nested addrec's.
822 SCEVHandle Start = SARE->getStart();
823 MoveImmediateValues(TLI, User, Start, Imm, isAddress, L, SE);
825 if (Start != SARE->getStart()) {
826 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
828 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
831 } else if (SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
832 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
833 if (isAddress && isTargetConstant(SME->getOperand(0), UseTy, TLI) &&
834 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
836 SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType());
837 SCEVHandle NewOp = SME->getOperand(1);
838 MoveImmediateValues(TLI, User, NewOp, SubImm, isAddress, L, SE);
840 // If we extracted something out of the subexpressions, see if we can
842 if (NewOp != SME->getOperand(1)) {
843 // Scale SubImm up by "8". If the result is a target constant, we are
845 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
846 if (isTargetConstant(SubImm, UseTy, TLI)) {
847 // Accumulate the immediate.
848 Imm = SE->getAddExpr(Imm, SubImm);
850 // Update what is left of 'Val'.
851 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
858 // Loop-variant expressions must stay in the immediate field of the
860 if ((isAddress && isTargetConstant(Val, UseTy, TLI)) ||
861 !Val->isLoopInvariant(L)) {
862 Imm = SE->getAddExpr(Imm, Val);
863 Val = SE->getIntegerSCEV(0, Val->getType());
867 // Otherwise, no immediates to move.
871 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
872 /// added together. This is used to reassociate common addition subexprs
873 /// together for maximal sharing when rewriting bases.
874 static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
876 ScalarEvolution *SE) {
877 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
878 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
879 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
880 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
881 SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
882 if (SARE->getOperand(0) == Zero) {
883 SubExprs.push_back(Expr);
885 // Compute the addrec with zero as its base.
886 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
887 Ops[0] = Zero; // Start with zero base.
888 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
891 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
893 } else if (!isa<SCEVConstant>(Expr) ||
894 !cast<SCEVConstant>(Expr)->getValue()->isZero()) {
896 SubExprs.push_back(Expr);
901 /// RemoveCommonExpressionsFromUseBases - Look through all of the uses in Bases,
902 /// removing any common subexpressions from it. Anything truly common is
903 /// removed, accumulated, and returned. This looks for things like (a+b+c) and
904 /// (a+c+d) -> (a+c). The common expression is *removed* from the Bases.
906 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
907 ScalarEvolution *SE) {
908 unsigned NumUses = Uses.size();
910 // Only one use? Use its base, regardless of what it is!
911 SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
912 SCEVHandle Result = Zero;
914 std::swap(Result, Uses[0].Base);
918 // To find common subexpressions, count how many of Uses use each expression.
919 // If any subexpressions are used Uses.size() times, they are common.
920 std::map<SCEVHandle, unsigned> SubExpressionUseCounts;
922 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
923 // order we see them.
924 std::vector<SCEVHandle> UniqueSubExprs;
926 std::vector<SCEVHandle> SubExprs;
927 for (unsigned i = 0; i != NumUses; ++i) {
928 // If the base is zero (which is common), return zero now, there are no
930 if (Uses[i].Base == Zero) return Zero;
932 // Split the expression into subexprs.
933 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
934 // Add one to SubExpressionUseCounts for each subexpr present.
935 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
936 if (++SubExpressionUseCounts[SubExprs[j]] == 1)
937 UniqueSubExprs.push_back(SubExprs[j]);
941 // Now that we know how many times each is used, build Result. Iterate over
942 // UniqueSubexprs so that we have a stable ordering.
943 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
944 std::map<SCEVHandle, unsigned>::iterator I =
945 SubExpressionUseCounts.find(UniqueSubExprs[i]);
946 assert(I != SubExpressionUseCounts.end() && "Entry not found?");
947 if (I->second == NumUses) { // Found CSE!
948 Result = SE->getAddExpr(Result, I->first);
950 // Remove non-cse's from SubExpressionUseCounts.
951 SubExpressionUseCounts.erase(I);
955 // If we found no CSE's, return now.
956 if (Result == Zero) return Result;
958 // Otherwise, remove all of the CSE's we found from each of the base values.
959 for (unsigned i = 0; i != NumUses; ++i) {
960 // Split the expression into subexprs.
961 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
963 // Remove any common subexpressions.
964 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
965 if (SubExpressionUseCounts.count(SubExprs[j])) {
966 SubExprs.erase(SubExprs.begin()+j);
970 // Finally, the non-shared expressions together.
971 if (SubExprs.empty())
974 Uses[i].Base = SE->getAddExpr(SubExprs);
981 /// isZero - returns true if the scalar evolution expression is zero.
983 static bool isZero(const SCEVHandle &V) {
984 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V))
985 return SC->getValue()->isZero();
989 /// ValidStride - Check whether the given Scale is valid for all loads and
990 /// stores in UsersToProcess.
992 bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
994 const std::vector<BasedUser>& UsersToProcess) {
998 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
999 // If this is a load or other access, pass the type of the access in.
1000 const Type *AccessTy = Type::VoidTy;
1001 if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].Inst))
1002 AccessTy = SI->getOperand(0)->getType();
1003 else if (LoadInst *LI = dyn_cast<LoadInst>(UsersToProcess[i].Inst))
1004 AccessTy = LI->getType();
1005 else if (isa<PHINode>(UsersToProcess[i].Inst))
1008 TargetLowering::AddrMode AM;
1009 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
1010 AM.BaseOffs = SC->getValue()->getSExtValue();
1011 AM.HasBaseReg = HasBaseReg || !isZero(UsersToProcess[i].Base);
1014 // If load[imm+r*scale] is illegal, bail out.
1015 if (!TLI->isLegalAddressingMode(AM, AccessTy))
1021 /// RequiresTypeConversion - Returns true if converting Ty to NewTy is not
1023 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
1027 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
1029 return (!Ty1->canLosslesslyBitCastTo(Ty2) &&
1030 !(isa<PointerType>(Ty2) &&
1031 Ty1->canLosslesslyBitCastTo(UIntPtrTy)) &&
1032 !(isa<PointerType>(Ty1) &&
1033 Ty2->canLosslesslyBitCastTo(UIntPtrTy)));
1036 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
1037 /// of a previous stride and it is a legal value for the target addressing
1038 /// mode scale component and optional base reg. This allows the users of
1039 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
1040 /// reuse is possible.
1041 unsigned LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
1042 bool AllUsesAreAddresses,
1043 const SCEVHandle &Stride,
1044 IVExpr &IV, const Type *Ty,
1045 const std::vector<BasedUser>& UsersToProcess) {
1046 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
1047 int64_t SInt = SC->getValue()->getSExtValue();
1048 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1050 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1051 IVsByStride.find(StrideOrder[NewStride]);
1052 if (SI == IVsByStride.end())
1054 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1055 if (SI->first != Stride &&
1056 (unsigned(abs(SInt)) < SSInt || (SInt % SSInt) != 0))
1058 int64_t Scale = SInt / SSInt;
1059 // Check that this stride is valid for all the types used for loads and
1060 // stores; if it can be used for some and not others, we might as well use
1061 // the original stride everywhere, since we have to create the IV for it
1062 // anyway. If the scale is 1, then we don't need to worry about folding
1065 (AllUsesAreAddresses &&
1066 ValidStride(HasBaseReg, Scale, UsersToProcess)))
1067 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1068 IE = SI->second.IVs.end(); II != IE; ++II)
1069 // FIXME: Only handle base == 0 for now.
1070 // Only reuse previous IV if it would not require a type conversion.
1071 if (isZero(II->Base) &&
1072 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1081 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1082 /// returns true if Val's isUseOfPostIncrementedValue is true.
1083 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1084 return Val.isUseOfPostIncrementedValue;
1087 /// isNonConstantNegative - Return true if the specified scev is negated, but
1089 static bool isNonConstantNegative(const SCEVHandle &Expr) {
1090 SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1091 if (!Mul) return false;
1093 // If there is a constant factor, it will be first.
1094 SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1095 if (!SC) return false;
1097 // Return true if the value is negative, this matches things like (-42 * V).
1098 return SC->getValue()->getValue().isNegative();
1101 /// isAddress - Returns true if the specified instruction is using the
1102 /// specified value as an address.
1103 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
1104 bool isAddress = isa<LoadInst>(Inst);
1105 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1106 if (SI->getOperand(1) == OperandVal)
1108 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
1109 // Addressing modes can also be folded into prefetches and a variety
1111 switch (II->getIntrinsicID()) {
1113 case Intrinsic::prefetch:
1114 case Intrinsic::x86_sse2_loadu_dq:
1115 case Intrinsic::x86_sse2_loadu_pd:
1116 case Intrinsic::x86_sse_loadu_ps:
1117 case Intrinsic::x86_sse_storeu_ps:
1118 case Intrinsic::x86_sse2_storeu_pd:
1119 case Intrinsic::x86_sse2_storeu_dq:
1120 case Intrinsic::x86_sse2_storel_dq:
1121 if (II->getOperand(1) == OperandVal)
1129 // CollectIVUsers - Transform our list of users and offsets to a bit more
1130 // complex table. In this new vector, each 'BasedUser' contains 'Base' the base
1131 // of the strided accessas well as the old information from Uses. We
1132 // progressively move information from the Base field to the Imm field, until
1133 // we eventually have the full access expression to rewrite the use.
1134 SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride,
1135 IVUsersOfOneStride &Uses,
1137 bool &AllUsesAreAddresses,
1138 std::vector<BasedUser> &UsersToProcess) {
1139 UsersToProcess.reserve(Uses.Users.size());
1140 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i) {
1141 UsersToProcess.push_back(BasedUser(Uses.Users[i], SE));
1143 // Move any loop invariant operands from the offset field to the immediate
1144 // field of the use, so that we don't try to use something before it is
1146 MoveLoopVariantsToImediateField(UsersToProcess.back().Base,
1147 UsersToProcess.back().Imm, L, SE);
1148 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1149 "Base value is not loop invariant!");
1152 // We now have a whole bunch of uses of like-strided induction variables, but
1153 // they might all have different bases. We want to emit one PHI node for this
1154 // stride which we fold as many common expressions (between the IVs) into as
1155 // possible. Start by identifying the common expressions in the base values
1156 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1157 // "A+B"), emit it to the preheader, then remove the expression from the
1158 // UsersToProcess base values.
1159 SCEVHandle CommonExprs =
1160 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE);
1162 // Next, figure out what we can represent in the immediate fields of
1163 // instructions. If we can represent anything there, move it to the imm
1164 // fields of the BasedUsers. We do this so that it increases the commonality
1165 // of the remaining uses.
1166 unsigned NumPHI = 0;
1167 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1168 // If the user is not in the current loop, this means it is using the exit
1169 // value of the IV. Do not put anything in the base, make sure it's all in
1170 // the immediate field to allow as much factoring as possible.
1171 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1172 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1173 UsersToProcess[i].Base);
1174 UsersToProcess[i].Base =
1175 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1178 // Addressing modes can be folded into loads and stores. Be careful that
1179 // the store is through the expression, not of the expression though.
1181 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1182 UsersToProcess[i].OperandValToReplace);
1183 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1188 // If this use isn't an address, then not all uses are addresses.
1189 if (!isAddress && !isPHI)
1190 AllUsesAreAddresses = false;
1192 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1193 UsersToProcess[i].Imm, isAddress, L, SE);
1197 // If one of the use if a PHI node and all other uses are addresses, still
1198 // allow iv reuse. Essentially we are trading one constant multiplication
1199 // for one fewer iv.
1201 AllUsesAreAddresses = false;
1206 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1207 /// stride of IV. All of the users may have different starting values, and this
1208 /// may not be the only stride (we know it is if isOnlyStride is true).
1209 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
1210 IVUsersOfOneStride &Uses,
1212 bool isOnlyStride) {
1213 // If all the users are moved to another stride, then there is nothing to do.
1214 if (Uses.Users.empty())
1217 // Keep track if every use in UsersToProcess is an address. If they all are,
1218 // we may be able to rewrite the entire collection of them in terms of a
1219 // smaller-stride IV.
1220 bool AllUsesAreAddresses = true;
1222 // Transform our list of users and offsets to a bit more complex table. In
1223 // this new vector, each 'BasedUser' contains 'Base' the base of the
1224 // strided accessas well as the old information from Uses. We progressively
1225 // move information from the Base field to the Imm field, until we eventually
1226 // have the full access expression to rewrite the use.
1227 std::vector<BasedUser> UsersToProcess;
1228 SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1231 // If we managed to find some expressions in common, we'll need to carry
1232 // their value in a register and add it in for each use. This will take up
1233 // a register operand, which potentially restricts what stride values are
1235 bool HaveCommonExprs = !isZero(CommonExprs);
1237 // If all uses are addresses, check if it is possible to reuse an IV with a
1238 // stride that is a factor of this stride. And that the multiple is a number
1239 // that can be encoded in the scale field of the target addressing mode. And
1240 // that we will have a valid instruction after this substition, including the
1241 // immediate field, if any.
1242 PHINode *NewPHI = NULL;
1244 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1245 SE->getIntegerSCEV(0, Type::Int32Ty),
1247 unsigned RewriteFactor = 0;
1248 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1249 Stride, ReuseIV, CommonExprs->getType(),
1251 if (RewriteFactor != 0) {
1252 DOUT << "BASED ON IV of STRIDE " << *ReuseIV.Stride
1253 << " and BASE " << *ReuseIV.Base << " :\n";
1254 NewPHI = ReuseIV.PHI;
1255 IncV = ReuseIV.IncV;
1258 const Type *ReplacedTy = CommonExprs->getType();
1260 // Now that we know what we need to do, insert the PHI node itself.
1262 DOUT << "INSERTING IV of TYPE " << *ReplacedTy << " of STRIDE "
1263 << *Stride << " and BASE " << *CommonExprs << ": ";
1265 SCEVExpander Rewriter(*SE, *LI);
1266 SCEVExpander PreheaderRewriter(*SE, *LI);
1268 BasicBlock *Preheader = L->getLoopPreheader();
1269 Instruction *PreInsertPt = Preheader->getTerminator();
1270 Instruction *PhiInsertBefore = L->getHeader()->begin();
1272 BasicBlock *LatchBlock = L->getLoopLatch();
1275 // Emit the initial base value into the loop preheader.
1277 = PreheaderRewriter.expandCodeFor(CommonExprs, PreInsertPt);
1279 if (RewriteFactor == 0) {
1280 // Create a new Phi for this base, and stick it in the loop header.
1281 NewPHI = PHINode::Create(ReplacedTy, "iv.", PhiInsertBefore);
1284 // Add common base to the new Phi node.
1285 NewPHI->addIncoming(CommonBaseV, Preheader);
1287 // If the stride is negative, insert a sub instead of an add for the
1289 bool isNegative = isNonConstantNegative(Stride);
1290 SCEVHandle IncAmount = Stride;
1292 IncAmount = SE->getNegativeSCEV(Stride);
1294 // Insert the stride into the preheader.
1295 Value *StrideV = PreheaderRewriter.expandCodeFor(IncAmount, PreInsertPt);
1296 if (!isa<ConstantInt>(StrideV)) ++NumVariable;
1298 // Emit the increment of the base value before the terminator of the loop
1299 // latch block, and add it to the Phi node.
1300 SCEVHandle IncExp = SE->getUnknown(StrideV);
1302 IncExp = SE->getNegativeSCEV(IncExp);
1303 IncExp = SE->getAddExpr(SE->getUnknown(NewPHI), IncExp);
1305 IncV = Rewriter.expandCodeFor(IncExp, LatchBlock->getTerminator());
1306 IncV->setName(NewPHI->getName()+".inc");
1307 NewPHI->addIncoming(IncV, LatchBlock);
1309 // Remember this in case a later stride is multiple of this.
1310 IVsByStride[Stride].addIV(Stride, CommonExprs, NewPHI, IncV);
1312 DOUT << " IV=%" << NewPHI->getNameStr() << " INC=%" << IncV->getNameStr();
1314 Constant *C = dyn_cast<Constant>(CommonBaseV);
1316 (!C->isNullValue() &&
1317 !isTargetConstant(SE->getUnknown(CommonBaseV), ReplacedTy, TLI)))
1318 // We want the common base emitted into the preheader! This is just
1319 // using cast as a copy so BitCast (no-op cast) is appropriate
1320 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1321 "commonbase", PreInsertPt);
1325 // We want to emit code for users inside the loop first. To do this, we
1326 // rearrange BasedUser so that the entries at the end have
1327 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1328 // vector (so we handle them first).
1329 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1330 PartitionByIsUseOfPostIncrementedValue);
1332 // Sort this by base, so that things with the same base are handled
1333 // together. By partitioning first and stable-sorting later, we are
1334 // guaranteed that within each base we will pop off users from within the
1335 // loop before users outside of the loop with a particular base.
1337 // We would like to use stable_sort here, but we can't. The problem is that
1338 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1339 // we don't have anything to do a '<' comparison on. Because we think the
1340 // number of uses is small, do a horrible bubble sort which just relies on
1342 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1343 // Get a base value.
1344 SCEVHandle Base = UsersToProcess[i].Base;
1346 // Compact everything with this base to be consequtive with this one.
1347 for (unsigned j = i+1; j != e; ++j) {
1348 if (UsersToProcess[j].Base == Base) {
1349 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1355 // Process all the users now. This outer loop handles all bases, the inner
1356 // loop handles all users of a particular base.
1357 while (!UsersToProcess.empty()) {
1358 SCEVHandle Base = UsersToProcess.back().Base;
1360 // Emit the code for Base into the preheader.
1361 Value *BaseV = PreheaderRewriter.expandCodeFor(Base, PreInsertPt);
1363 DOUT << " INSERTING code for BASE = " << *Base << ":";
1364 if (BaseV->hasName())
1365 DOUT << " Result value name = %" << BaseV->getNameStr();
1368 // If BaseV is a constant other than 0, make sure that it gets inserted into
1369 // the preheader, instead of being forward substituted into the uses. We do
1370 // this by forcing a BitCast (noop cast) to be inserted into the preheader
1372 if (Constant *C = dyn_cast<Constant>(BaseV)) {
1373 if (!C->isNullValue() && !isTargetConstant(Base, ReplacedTy, TLI)) {
1374 // We want this constant emitted into the preheader! This is just
1375 // using cast as a copy so BitCast (no-op cast) is appropriate
1376 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1381 // Emit the code to add the immediate offset to the Phi value, just before
1382 // the instructions that we identified as using this stride and base.
1384 // FIXME: Use emitted users to emit other users.
1385 BasedUser &User = UsersToProcess.back();
1387 // If this instruction wants to use the post-incremented value, move it
1388 // after the post-inc and use its value instead of the PHI.
1389 Value *RewriteOp = NewPHI;
1390 if (User.isUseOfPostIncrementedValue) {
1393 // If this user is in the loop, make sure it is the last thing in the
1394 // loop to ensure it is dominated by the increment.
1395 if (L->contains(User.Inst->getParent()))
1396 User.Inst->moveBefore(LatchBlock->getTerminator());
1398 if (RewriteOp->getType() != ReplacedTy) {
1399 Instruction::CastOps opcode = Instruction::Trunc;
1400 if (ReplacedTy->getPrimitiveSizeInBits() ==
1401 RewriteOp->getType()->getPrimitiveSizeInBits())
1402 opcode = Instruction::BitCast;
1403 RewriteOp = SCEVExpander::InsertCastOfTo(opcode, RewriteOp, ReplacedTy);
1406 SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
1408 // If we had to insert new instrutions for RewriteOp, we have to
1409 // consider that they may not have been able to end up immediately
1410 // next to RewriteOp, because non-PHI instructions may never precede
1411 // PHI instructions in a block. In this case, remember where the last
1412 // instruction was inserted so that if we're replacing a different
1413 // PHI node, we can use the later point to expand the final
1415 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1416 if (RewriteOp == NewPHI) NewBasePt = 0;
1418 // Clear the SCEVExpander's expression map so that we are guaranteed
1419 // to have the code emitted where we expect it.
1422 // If we are reusing the iv, then it must be multiplied by a constant
1423 // factor take advantage of addressing mode scale component.
1424 if (RewriteFactor != 0) {
1425 RewriteExpr = SE->getMulExpr(SE->getIntegerSCEV(RewriteFactor,
1426 RewriteExpr->getType()),
1429 // The common base is emitted in the loop preheader. But since we
1430 // are reusing an IV, it has not been used to initialize the PHI node.
1431 // Add it to the expression used to rewrite the uses.
1432 if (!isa<ConstantInt>(CommonBaseV) ||
1433 !cast<ConstantInt>(CommonBaseV)->isZero())
1434 RewriteExpr = SE->getAddExpr(RewriteExpr,
1435 SE->getUnknown(CommonBaseV));
1438 // Now that we know what we need to do, insert code before User for the
1439 // immediate and any loop-variant expressions.
1440 if (!isa<ConstantInt>(BaseV) || !cast<ConstantInt>(BaseV)->isZero())
1441 // Add BaseV to the PHI value if needed.
1442 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1444 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1448 // Mark old value we replaced as possibly dead, so that it is elminated
1449 // if we just replaced the last use of that value.
1450 DeadInsts.insert(cast<Instruction>(User.OperandValToReplace));
1452 UsersToProcess.pop_back();
1455 // If there are any more users to process with the same base, process them
1456 // now. We sorted by base above, so we just have to check the last elt.
1457 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1458 // TODO: Next, find out which base index is the most common, pull it out.
1461 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1462 // different starting values, into different PHIs.
1465 /// FindIVForUser - If Cond has an operand that is an expression of an IV,
1466 /// set the IV user and stride information and return true, otherwise return
1468 bool LoopStrengthReduce::FindIVForUser(ICmpInst *Cond, IVStrideUse *&CondUse,
1469 const SCEVHandle *&CondStride) {
1470 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e && !CondUse;
1472 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
1473 IVUsesByStride.find(StrideOrder[Stride]);
1474 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
1476 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
1477 E = SI->second.Users.end(); UI != E; ++UI)
1478 if (UI->User == Cond) {
1479 // NOTE: we could handle setcc instructions with multiple uses here, but
1480 // InstCombine does it as well for simple uses, it's not clear that it
1481 // occurs enough in real life to handle.
1483 CondStride = &SI->first;
1491 // Constant strides come first which in turns are sorted by their absolute
1492 // values. If absolute values are the same, then positive strides comes first.
1494 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1495 struct StrideCompare {
1496 bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
1497 SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1498 SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1500 int64_t LV = LHSC->getValue()->getSExtValue();
1501 int64_t RV = RHSC->getValue()->getSExtValue();
1502 uint64_t ALV = (LV < 0) ? -LV : LV;
1503 uint64_t ARV = (RV < 0) ? -RV : RV;
1509 return (LHSC && !RHSC);
1514 /// ChangeCompareStride - If a loop termination compare instruction is the
1515 /// only use of its stride, and the compaison is against a constant value,
1516 /// try eliminate the stride by moving the compare instruction to another
1517 /// stride and change its constant operand accordingly. e.g.
1523 /// if (v2 < 10) goto loop
1528 /// if (v1 < 30) goto loop
1529 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1530 IVStrideUse* &CondUse,
1531 const SCEVHandle* &CondStride) {
1532 // Forgo this transformation if the condition has multiple uses. This is
1533 // over-conservative, but simpler than alternatives. It guards against
1534 // comparisons with a use that occurs earlier than the add instruction for the
1535 // new stride index. See
1536 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness.ll
1537 // for an example of this situation.
1538 if (!Cond->hasOneUse())
1541 if (StrideOrder.size() < 2 ||
1542 IVUsesByStride[*CondStride].Users.size() != 1)
1544 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
1545 if (!SC) return Cond;
1546 ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1));
1547 if (!C) return Cond;
1549 ICmpInst::Predicate Predicate = Cond->getPredicate();
1550 int64_t CmpSSInt = SC->getValue()->getSExtValue();
1551 int64_t CmpVal = C->getValue().getSExtValue();
1552 unsigned BitWidth = C->getValue().getBitWidth();
1553 uint64_t SignBit = 1ULL << (BitWidth-1);
1554 const Type *CmpTy = C->getType();
1555 const Type *NewCmpTy = NULL;
1556 unsigned TyBits = CmpTy->getPrimitiveSizeInBits();
1557 unsigned NewTyBits = 0;
1558 int64_t NewCmpVal = CmpVal;
1559 SCEVHandle *NewStride = NULL;
1560 Value *NewIncV = NULL;
1563 // Look for a suitable stride / iv as replacement.
1564 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
1565 for (unsigned i = 0, e = StrideOrder.size(); i != e; ++i) {
1566 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
1567 IVUsesByStride.find(StrideOrder[i]);
1568 if (!isa<SCEVConstant>(SI->first))
1570 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1571 if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
1574 Scale = SSInt / CmpSSInt;
1575 NewCmpVal = CmpVal * Scale;
1576 APInt Mul = APInt(BitWidth, NewCmpVal);
1577 // Check for overflow.
1578 if (Mul.getSExtValue() != NewCmpVal) {
1583 // Watch out for overflow.
1584 if (ICmpInst::isSignedPredicate(Predicate) &&
1585 (CmpVal & SignBit) != (NewCmpVal & SignBit))
1588 if (NewCmpVal != CmpVal) {
1589 // Pick the best iv to use trying to avoid a cast.
1591 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
1592 E = SI->second.Users.end(); UI != E; ++UI) {
1593 NewIncV = UI->OperandValToReplace;
1594 if (NewIncV->getType() == CmpTy)
1602 NewCmpTy = NewIncV->getType();
1603 NewTyBits = isa<PointerType>(NewCmpTy)
1604 ? UIntPtrTy->getPrimitiveSizeInBits()
1605 : NewCmpTy->getPrimitiveSizeInBits();
1606 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1607 // Check if it is possible to rewrite it using
1608 // an iv / stride of a smaller integer type.
1609 bool TruncOk = false;
1610 if (NewCmpTy->isInteger()) {
1611 unsigned Bits = NewTyBits;
1612 if (ICmpInst::isSignedPredicate(Predicate))
1614 uint64_t Mask = (1ULL << Bits) - 1;
1615 if (((uint64_t)NewCmpVal & Mask) == (uint64_t)NewCmpVal)
1624 // Don't rewrite if use offset is non-constant and the new type is
1625 // of a different type.
1626 // FIXME: too conservative?
1627 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->Offset)) {
1632 bool AllUsesAreAddresses = true;
1633 std::vector<BasedUser> UsersToProcess;
1634 SCEVHandle CommonExprs = CollectIVUsers(SI->first, SI->second, L,
1635 AllUsesAreAddresses,
1637 // Avoid rewriting the compare instruction with an iv of new stride
1638 // if it's likely the new stride uses will be rewritten using the
1639 if (AllUsesAreAddresses &&
1640 ValidStride(!isZero(CommonExprs), Scale, UsersToProcess)) {
1645 // If scale is negative, use inverse predicate unless it's testing
1647 if (Scale < 0 && !Cond->isEquality())
1648 Predicate = ICmpInst::getInversePredicate(Predicate);
1650 NewStride = &StrideOrder[i];
1655 if (NewCmpVal != CmpVal) {
1656 // Create a new compare instruction using new stride / iv.
1657 ICmpInst *OldCond = Cond;
1659 if (!isa<PointerType>(NewCmpTy))
1660 RHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1662 RHS = ConstantInt::get(UIntPtrTy, NewCmpVal);
1663 RHS = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr, RHS, NewCmpTy);
1665 // Insert new compare instruction.
1666 Cond = new ICmpInst(Predicate, NewIncV, RHS,
1667 L->getHeader()->getName() + ".termcond",
1670 // Remove the old compare instruction. The old indvar is probably dead too.
1671 DeadInsts.insert(cast<Instruction>(CondUse->OperandValToReplace));
1672 SE->deleteValueFromRecords(OldCond);
1673 OldCond->replaceAllUsesWith(Cond);
1674 OldCond->eraseFromParent();
1676 IVUsesByStride[*CondStride].Users.pop_back();
1677 SCEVHandle NewOffset = TyBits == NewTyBits
1678 ? SE->getMulExpr(CondUse->Offset,
1679 SE->getConstant(ConstantInt::get(CmpTy, Scale)))
1680 : SE->getConstant(ConstantInt::get(NewCmpTy,
1681 cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
1682 IVUsesByStride[*NewStride].addUser(NewOffset, Cond, NewIncV);
1683 CondUse = &IVUsesByStride[*NewStride].Users.back();
1684 CondStride = NewStride;
1691 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
1692 // uses in the loop, look to see if we can eliminate some, in favor of using
1693 // common indvars for the different uses.
1694 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
1695 // TODO: implement optzns here.
1697 // Finally, get the terminating condition for the loop if possible. If we
1698 // can, we want to change it to use a post-incremented version of its
1699 // induction variable, to allow coalescing the live ranges for the IV into
1700 // one register value.
1701 PHINode *SomePHI = cast<PHINode>(L->getHeader()->begin());
1702 BasicBlock *Preheader = L->getLoopPreheader();
1703 BasicBlock *LatchBlock =
1704 SomePHI->getIncomingBlock(SomePHI->getIncomingBlock(0) == Preheader);
1705 BranchInst *TermBr = dyn_cast<BranchInst>(LatchBlock->getTerminator());
1706 if (!TermBr || TermBr->isUnconditional() ||
1707 !isa<ICmpInst>(TermBr->getCondition()))
1709 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
1711 // Search IVUsesByStride to find Cond's IVUse if there is one.
1712 IVStrideUse *CondUse = 0;
1713 const SCEVHandle *CondStride = 0;
1715 if (!FindIVForUser(Cond, CondUse, CondStride))
1716 return; // setcc doesn't use the IV.
1718 // If possible, change stride and operands of the compare instruction to
1719 // eliminate one stride.
1720 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
1722 // It's possible for the setcc instruction to be anywhere in the loop, and
1723 // possible for it to have multiple users. If it is not immediately before
1724 // the latch block branch, move it.
1725 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
1726 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
1727 Cond->moveBefore(TermBr);
1729 // Otherwise, clone the terminating condition and insert into the loopend.
1730 Cond = cast<ICmpInst>(Cond->clone());
1731 Cond->setName(L->getHeader()->getName() + ".termcond");
1732 LatchBlock->getInstList().insert(TermBr, Cond);
1734 // Clone the IVUse, as the old use still exists!
1735 IVUsesByStride[*CondStride].addUser(CondUse->Offset, Cond,
1736 CondUse->OperandValToReplace);
1737 CondUse = &IVUsesByStride[*CondStride].Users.back();
1741 // If we get to here, we know that we can transform the setcc instruction to
1742 // use the post-incremented version of the IV, allowing us to coalesce the
1743 // live ranges for the IV correctly.
1744 CondUse->Offset = SE->getMinusSCEV(CondUse->Offset, *CondStride);
1745 CondUse->isUseOfPostIncrementedValue = true;
1748 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
1750 LI = &getAnalysis<LoopInfo>();
1751 DT = &getAnalysis<DominatorTree>();
1752 SE = &getAnalysis<ScalarEvolution>();
1753 TD = &getAnalysis<TargetData>();
1754 UIntPtrTy = TD->getIntPtrType();
1756 // Find all uses of induction variables in this loop, and catagorize
1757 // them by stride. Start by finding all of the PHI nodes in the header for
1758 // this loop. If they are induction variables, inspect their uses.
1759 SmallPtrSet<Instruction*,16> Processed; // Don't reprocess instructions.
1760 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
1761 AddUsersIfInteresting(I, L, Processed);
1763 // If we have nothing to do, return.
1764 if (IVUsesByStride.empty()) return false;
1766 // Optimize induction variables. Some indvar uses can be transformed to use
1767 // strides that will be needed for other purposes. A common example of this
1768 // is the exit test for the loop, which can often be rewritten to use the
1769 // computation of some other indvar to decide when to terminate the loop.
1773 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
1774 // doing computation in byte values, promote to 32-bit values if safe.
1776 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
1777 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should be
1778 // codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. Need
1779 // to be careful that IV's are all the same type. Only works for intptr_t
1782 // If we only have one stride, we can more aggressively eliminate some things.
1783 bool HasOneStride = IVUsesByStride.size() == 1;
1786 DOUT << "\nLSR on ";
1790 // IVsByStride keeps IVs for one particular loop.
1791 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
1793 // Sort the StrideOrder so we process larger strides first.
1794 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
1796 // Note: this processes each stride/type pair individually. All users passed
1797 // into StrengthReduceStridedIVUsers have the same type AND stride. Also,
1798 // note that we iterate over IVUsesByStride indirectly by using StrideOrder.
1799 // This extra layer of indirection makes the ordering of strides deterministic
1800 // - not dependent on map order.
1801 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
1802 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
1803 IVUsesByStride.find(StrideOrder[Stride]);
1804 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
1805 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
1808 // We're done analyzing this loop; release all the state we built up for it.
1809 CastedPointers.clear();
1810 IVUsesByStride.clear();
1811 IVsByStride.clear();
1812 StrideOrder.clear();
1814 // Clean up after ourselves
1815 if (!DeadInsts.empty()) {
1816 DeleteTriviallyDeadInstructions(DeadInsts);
1818 BasicBlock::iterator I = L->getHeader()->begin();
1820 while ((PN = dyn_cast<PHINode>(I))) {
1821 ++I; // Preincrement iterator to avoid invalidating it when deleting PN.
1823 // At this point, we know that we have killed one or more GEP
1824 // instructions. It is worth checking to see if the cann indvar is also
1825 // dead, so that we can remove it as well. The requirements for the cann
1826 // indvar to be considered dead are:
1827 // 1. the cann indvar has one use
1828 // 2. the use is an add instruction
1829 // 3. the add has one use
1830 // 4. the add is used by the cann indvar
1831 // If all four cases above are true, then we can remove both the add and
1833 // FIXME: this needs to eliminate an induction variable even if it's being
1834 // compared against some value to decide loop termination.
1835 if (PN->hasOneUse()) {
1836 Instruction *BO = dyn_cast<Instruction>(*PN->use_begin());
1837 if (BO && (isa<BinaryOperator>(BO) || isa<CmpInst>(BO))) {
1838 if (BO->hasOneUse() && PN == *(BO->use_begin())) {
1839 DeadInsts.insert(BO);
1840 // Break the cycle, then delete the PHI.
1841 SE->deleteValueFromRecords(PN);
1842 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
1843 PN->eraseFromParent();
1848 DeleteTriviallyDeadInstructions(DeadInsts);