1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Type.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Analysis/Dominators.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Analysis/ScalarEvolutionExpander.h"
29 #include "llvm/Support/CFG.h"
30 #include "llvm/Support/GetElementPtrTypeIterator.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Target/TargetLowering.h"
43 STATISTIC(NumReduced , "Number of GEPs strength reduced");
44 STATISTIC(NumInserted, "Number of PHIs inserted");
45 STATISTIC(NumVariable, "Number of PHIs with variable strides");
46 STATISTIC(NumEliminated, "Number of strides eliminated");
47 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
53 /// IVStrideUse - Keep track of one use of a strided induction variable, where
54 /// the stride is stored externally. The Offset member keeps track of the
55 /// offset from the IV, User is the actual user of the operand, and
56 /// 'OperandValToReplace' is the operand of the User that is the use.
57 struct VISIBILITY_HIDDEN IVStrideUse {
60 Value *OperandValToReplace;
62 // isUseOfPostIncrementedValue - True if this should use the
63 // post-incremented version of this IV, not the preincremented version.
64 // This can only be set in special cases, such as the terminating setcc
65 // instruction for a loop or uses dominated by the loop.
66 bool isUseOfPostIncrementedValue;
68 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
69 : Offset(Offs), User(U), OperandValToReplace(O),
70 isUseOfPostIncrementedValue(false) {}
73 /// IVUsersOfOneStride - This structure keeps track of all instructions that
74 /// have an operand that is based on the trip count multiplied by some stride.
75 /// The stride for all of these users is common and kept external to this
77 struct VISIBILITY_HIDDEN IVUsersOfOneStride {
78 /// Users - Keep track of all of the users of this stride as well as the
79 /// initial value and the operand that uses the IV.
80 std::vector<IVStrideUse> Users;
82 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
83 Users.push_back(IVStrideUse(Offset, User, Operand));
87 /// IVInfo - This structure keeps track of one IV expression inserted during
88 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
89 /// well as the PHI node and increment value created for rewrite.
90 struct VISIBILITY_HIDDEN IVExpr {
96 IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi,
98 : Stride(stride), Base(base), PHI(phi), IncV(incv) {}
101 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
102 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
103 struct VISIBILITY_HIDDEN IVsOfOneStride {
104 std::vector<IVExpr> IVs;
106 void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI,
108 IVs.push_back(IVExpr(Stride, Base, PHI, IncV));
112 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
116 const TargetData *TD;
117 const Type *UIntPtrTy;
120 /// IVUsesByStride - Keep track of all uses of induction variables that we
121 /// are interested in. The key of the map is the stride of the access.
122 std::map<SCEVHandle, IVUsersOfOneStride> IVUsesByStride;
124 /// IVsByStride - Keep track of all IVs that have been inserted for a
125 /// particular stride.
126 std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
128 /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
129 /// We use this to iterate over the IVUsesByStride collection without being
130 /// dependent on random ordering of pointers in the process.
131 SmallVector<SCEVHandle, 16> StrideOrder;
133 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
134 /// of the casted version of each value. This is accessed by
135 /// getCastedVersionOf.
136 DenseMap<Value*, Value*> CastedPointers;
138 /// DeadInsts - Keep track of instructions we may have made dead, so that
139 /// we can remove them after we are done working.
140 SmallVector<Instruction*, 16> DeadInsts;
142 /// TLI - Keep a pointer of a TargetLowering to consult for determining
143 /// transformation profitability.
144 const TargetLowering *TLI;
147 static char ID; // Pass ID, replacement for typeid
148 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
149 LoopPass(&ID), TLI(tli) {
152 bool runOnLoop(Loop *L, LPPassManager &LPM);
154 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
155 // We split critical edges, so we change the CFG. However, we do update
156 // many analyses if they are around.
157 AU.addPreservedID(LoopSimplifyID);
158 AU.addPreserved<LoopInfo>();
159 AU.addPreserved<DominanceFrontier>();
160 AU.addPreserved<DominatorTree>();
162 AU.addRequiredID(LoopSimplifyID);
163 AU.addRequired<LoopInfo>();
164 AU.addRequired<DominatorTree>();
165 AU.addRequired<TargetData>();
166 AU.addRequired<ScalarEvolution>();
167 AU.addPreserved<ScalarEvolution>();
170 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
172 Value *getCastedVersionOf(Instruction::CastOps opcode, Value *V);
174 bool AddUsersIfInteresting(Instruction *I, Loop *L,
175 SmallPtrSet<Instruction*,16> &Processed);
176 SCEVHandle GetExpressionSCEV(Instruction *E);
177 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
178 IVStrideUse* &CondUse,
179 const SCEVHandle* &CondStride);
180 void OptimizeIndvars(Loop *L);
182 /// OptimizeShadowIV - If IV is used in a int-to-float cast
183 /// inside the loop then try to eliminate the cast opeation.
184 void OptimizeShadowIV(Loop *L);
186 /// OptimizeSMax - Rewrite the loop's terminating condition
187 /// if it uses an smax computation.
188 ICmpInst *OptimizeSMax(Loop *L, ICmpInst *Cond,
189 IVStrideUse* &CondUse);
191 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
192 const SCEVHandle *&CondStride);
193 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
194 int64_t CheckForIVReuse(bool, bool, bool, const SCEVHandle&,
195 IVExpr&, const Type*,
196 const std::vector<BasedUser>& UsersToProcess);
197 bool ValidStride(bool, int64_t,
198 const std::vector<BasedUser>& UsersToProcess);
199 SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
200 IVUsersOfOneStride &Uses,
202 bool &AllUsesAreAddresses,
203 bool &AllUsesAreOutsideLoop,
204 std::vector<BasedUser> &UsersToProcess);
205 void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
206 IVUsersOfOneStride &Uses,
207 Loop *L, bool isOnlyStride);
208 void DeleteTriviallyDeadInstructions();
212 char LoopStrengthReduce::ID = 0;
213 static RegisterPass<LoopStrengthReduce>
214 X("loop-reduce", "Loop Strength Reduction");
216 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
217 return new LoopStrengthReduce(TLI);
220 /// getCastedVersionOf - Return the specified value casted to uintptr_t. This
221 /// assumes that the Value* V is of integer or pointer type only.
223 Value *LoopStrengthReduce::getCastedVersionOf(Instruction::CastOps opcode,
225 if (V->getType() == UIntPtrTy) return V;
226 if (Constant *CB = dyn_cast<Constant>(V))
227 return ConstantExpr::getCast(opcode, CB, UIntPtrTy);
229 Value *&New = CastedPointers[V];
232 New = SCEVExpander::InsertCastOfTo(opcode, V, UIntPtrTy);
233 DeadInsts.push_back(cast<Instruction>(New));
238 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
239 /// specified set are trivially dead, delete them and see if this makes any of
240 /// their operands subsequently dead.
241 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
242 if (DeadInsts.empty()) return;
244 // Sort the deadinsts list so that we can trivially eliminate duplicates as we
245 // go. The code below never adds a non-dead instruction to the worklist, but
246 // callers may not be so careful.
247 array_pod_sort(DeadInsts.begin(), DeadInsts.end());
249 // Drop duplicate instructions and those with uses.
250 for (unsigned i = 0, e = DeadInsts.size()-1; i < e; ++i) {
251 Instruction *I = DeadInsts[i];
252 if (!I->use_empty()) DeadInsts[i] = 0;
253 while (i != e && DeadInsts[i+1] == I)
257 while (!DeadInsts.empty()) {
258 Instruction *I = DeadInsts.back();
259 DeadInsts.pop_back();
261 if (I == 0 || !isInstructionTriviallyDead(I))
264 SE->deleteValueFromRecords(I);
266 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
267 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
270 DeadInsts.push_back(U);
274 I->eraseFromParent();
280 /// GetExpressionSCEV - Compute and return the SCEV for the specified
282 SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
283 // Pointer to pointer bitcast instructions return the same value as their
285 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Exp)) {
286 if (SE->hasSCEV(BCI) || !isa<Instruction>(BCI->getOperand(0)))
287 return SE->getSCEV(BCI);
288 SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)));
293 // Scalar Evolutions doesn't know how to compute SCEV's for GEP instructions.
294 // If this is a GEP that SE doesn't know about, compute it now and insert it.
295 // If this is not a GEP, or if we have already done this computation, just let
297 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Exp);
298 if (!GEP || SE->hasSCEV(GEP))
299 return SE->getSCEV(Exp);
301 // Analyze all of the subscripts of this getelementptr instruction, looking
302 // for uses that are determined by the trip count of the loop. First, skip
303 // all operands the are not dependent on the IV.
305 // Build up the base expression. Insert an LLVM cast of the pointer to
307 SCEVHandle GEPVal = SE->getUnknown(
308 getCastedVersionOf(Instruction::PtrToInt, GEP->getOperand(0)));
310 gep_type_iterator GTI = gep_type_begin(GEP);
312 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
313 i != e; ++i, ++GTI) {
314 // If this is a use of a recurrence that we can analyze, and it comes before
315 // Op does in the GEP operand list, we will handle this when we process this
317 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
318 const StructLayout *SL = TD->getStructLayout(STy);
319 unsigned Idx = cast<ConstantInt>(*i)->getZExtValue();
320 uint64_t Offset = SL->getElementOffset(Idx);
321 GEPVal = SE->getAddExpr(GEPVal,
322 SE->getIntegerSCEV(Offset, UIntPtrTy));
324 unsigned GEPOpiBits =
325 (*i)->getType()->getPrimitiveSizeInBits();
326 unsigned IntPtrBits = UIntPtrTy->getPrimitiveSizeInBits();
327 Instruction::CastOps opcode = (GEPOpiBits < IntPtrBits ?
328 Instruction::SExt : (GEPOpiBits > IntPtrBits ? Instruction::Trunc :
329 Instruction::BitCast));
330 Value *OpVal = getCastedVersionOf(opcode, *i);
331 SCEVHandle Idx = SE->getSCEV(OpVal);
333 uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType());
335 Idx = SE->getMulExpr(Idx,
336 SE->getConstant(ConstantInt::get(UIntPtrTy,
338 GEPVal = SE->getAddExpr(GEPVal, Idx);
342 SE->setSCEV(GEP, GEPVal);
346 /// getSCEVStartAndStride - Compute the start and stride of this expression,
347 /// returning false if the expression is not a start/stride pair, or true if it
348 /// is. The stride must be a loop invariant expression, but the start may be
349 /// a mix of loop invariant and loop variant expressions.
350 static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
351 SCEVHandle &Start, SCEVHandle &Stride,
352 ScalarEvolution *SE) {
353 SCEVHandle TheAddRec = Start; // Initialize to zero.
355 // If the outer level is an AddExpr, the operands are all start values except
356 // for a nested AddRecExpr.
357 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
358 for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
359 if (SCEVAddRecExpr *AddRec =
360 dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
361 if (AddRec->getLoop() == L)
362 TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
364 return false; // Nested IV of some sort?
366 Start = SE->getAddExpr(Start, AE->getOperand(i));
369 } else if (isa<SCEVAddRecExpr>(SH)) {
372 return false; // not analyzable.
375 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
376 if (!AddRec || AddRec->getLoop() != L) return false;
378 // FIXME: Generalize to non-affine IV's.
379 if (!AddRec->isAffine()) return false;
381 Start = SE->getAddExpr(Start, AddRec->getOperand(0));
383 if (!isa<SCEVConstant>(AddRec->getOperand(1)))
384 DOUT << "[" << L->getHeader()->getName()
385 << "] Variable stride: " << *AddRec << "\n";
387 Stride = AddRec->getOperand(1);
391 /// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
392 /// and now we need to decide whether the user should use the preinc or post-inc
393 /// value. If this user should use the post-inc version of the IV, return true.
395 /// Choosing wrong here can break dominance properties (if we choose to use the
396 /// post-inc value when we cannot) or it can end up adding extra live-ranges to
397 /// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
398 /// should use the post-inc value).
399 static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
400 Loop *L, DominatorTree *DT, Pass *P,
401 SmallVectorImpl<Instruction*> &DeadInsts){
402 // If the user is in the loop, use the preinc value.
403 if (L->contains(User->getParent())) return false;
405 BasicBlock *LatchBlock = L->getLoopLatch();
407 // Ok, the user is outside of the loop. If it is dominated by the latch
408 // block, use the post-inc value.
409 if (DT->dominates(LatchBlock, User->getParent()))
412 // There is one case we have to be careful of: PHI nodes. These little guys
413 // can live in blocks that do not dominate the latch block, but (since their
414 // uses occur in the predecessor block, not the block the PHI lives in) should
415 // still use the post-inc value. Check for this case now.
416 PHINode *PN = dyn_cast<PHINode>(User);
417 if (!PN) return false; // not a phi, not dominated by latch block.
419 // Look at all of the uses of IV by the PHI node. If any use corresponds to
420 // a block that is not dominated by the latch block, give up and use the
421 // preincremented value.
422 unsigned NumUses = 0;
423 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
424 if (PN->getIncomingValue(i) == IV) {
426 if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
430 // Okay, all uses of IV by PN are in predecessor blocks that really are
431 // dominated by the latch block. Split the critical edges and use the
432 // post-incremented value.
433 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
434 if (PN->getIncomingValue(i) == IV) {
435 SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
436 // Splitting the critical edge can reduce the number of entries in this
438 e = PN->getNumIncomingValues();
439 if (--NumUses == 0) break;
442 // PHI node might have become a constant value after SplitCriticalEdge.
443 DeadInsts.push_back(User);
448 /// isAddress - Returns true if the specified instruction is using the
449 /// specified value as an address.
450 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
451 bool isAddress = isa<LoadInst>(Inst);
452 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
453 if (SI->getOperand(1) == OperandVal)
455 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
456 // Addressing modes can also be folded into prefetches and a variety
458 switch (II->getIntrinsicID()) {
460 case Intrinsic::prefetch:
461 case Intrinsic::x86_sse2_loadu_dq:
462 case Intrinsic::x86_sse2_loadu_pd:
463 case Intrinsic::x86_sse_loadu_ps:
464 case Intrinsic::x86_sse_storeu_ps:
465 case Intrinsic::x86_sse2_storeu_pd:
466 case Intrinsic::x86_sse2_storeu_dq:
467 case Intrinsic::x86_sse2_storel_dq:
468 if (II->getOperand(1) == OperandVal)
476 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
477 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
478 /// return true. Otherwise, return false.
479 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
480 SmallPtrSet<Instruction*,16> &Processed) {
481 if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
482 return false; // Void and FP expressions cannot be reduced.
483 if (!Processed.insert(I))
484 return true; // Instruction already handled.
486 // Get the symbolic expression for this instruction.
487 SCEVHandle ISE = GetExpressionSCEV(I);
488 if (isa<SCEVCouldNotCompute>(ISE)) return false;
490 // Get the start and stride for this expression.
491 SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType());
492 SCEVHandle Stride = Start;
493 if (!getSCEVStartAndStride(ISE, L, Start, Stride, SE))
494 return false; // Non-reducible symbolic expression, bail out.
496 std::vector<Instruction *> IUsers;
497 // Collect all I uses now because IVUseShouldUsePostIncValue may
498 // invalidate use_iterator.
499 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
500 IUsers.push_back(cast<Instruction>(*UI));
502 for (unsigned iused_index = 0, iused_size = IUsers.size();
503 iused_index != iused_size; ++iused_index) {
505 Instruction *User = IUsers[iused_index];
507 // Do not infinitely recurse on PHI nodes.
508 if (isa<PHINode>(User) && Processed.count(User))
511 // Descend recursively, but not into PHI nodes outside the current loop.
512 // It's important to see the entire expression outside the loop to get
513 // choices that depend on addressing mode use right, although we won't
514 // consider references ouside the loop in all cases.
515 // If User is already in Processed, we don't want to recurse into it again,
516 // but do want to record a second reference in the same instruction.
517 bool AddUserToIVUsers = false;
518 if (LI->getLoopFor(User->getParent()) != L) {
519 if (isa<PHINode>(User) || Processed.count(User) ||
520 !AddUsersIfInteresting(User, L, Processed)) {
521 DOUT << "FOUND USER in other loop: " << *User
522 << " OF SCEV: " << *ISE << "\n";
523 AddUserToIVUsers = true;
525 } else if (Processed.count(User) ||
526 !AddUsersIfInteresting(User, L, Processed)) {
527 DOUT << "FOUND USER: " << *User
528 << " OF SCEV: " << *ISE << "\n";
529 AddUserToIVUsers = true;
532 if (AddUserToIVUsers) {
533 IVUsersOfOneStride &StrideUses = IVUsesByStride[Stride];
534 if (StrideUses.Users.empty()) // First occurrence of this stride?
535 StrideOrder.push_back(Stride);
537 // Okay, we found a user that we cannot reduce. Analyze the instruction
538 // and decide what to do with it. If we are a use inside of the loop, use
539 // the value before incrementation, otherwise use it after incrementation.
540 if (IVUseShouldUsePostIncValue(User, I, L, DT, this, DeadInsts)) {
541 // The value used will be incremented by the stride more than we are
542 // expecting, so subtract this off.
543 SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
544 StrideUses.addUser(NewStart, User, I);
545 StrideUses.Users.back().isUseOfPostIncrementedValue = true;
546 DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n";
548 StrideUses.addUser(Start, User, I);
556 /// BasedUser - For a particular base value, keep information about how we've
557 /// partitioned the expression so far.
559 /// SE - The current ScalarEvolution object.
562 /// Base - The Base value for the PHI node that needs to be inserted for
563 /// this use. As the use is processed, information gets moved from this
564 /// field to the Imm field (below). BasedUser values are sorted by this
568 /// Inst - The instruction using the induction variable.
571 /// OperandValToReplace - The operand value of Inst to replace with the
573 Value *OperandValToReplace;
575 /// Imm - The immediate value that should be added to the base immediately
576 /// before Inst, because it will be folded into the imm field of the
580 // isUseOfPostIncrementedValue - True if this should use the
581 // post-incremented version of this IV, not the preincremented version.
582 // This can only be set in special cases, such as the terminating setcc
583 // instruction for a loop and uses outside the loop that are dominated by
585 bool isUseOfPostIncrementedValue;
587 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
588 : SE(se), Base(IVSU.Offset), Inst(IVSU.User),
589 OperandValToReplace(IVSU.OperandValToReplace),
590 Imm(SE->getIntegerSCEV(0, Base->getType())),
591 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue) {}
593 // Once we rewrite the code to insert the new IVs we want, update the
594 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
596 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
597 Instruction *InsertPt,
598 SCEVExpander &Rewriter, Loop *L, Pass *P,
599 SmallVectorImpl<Instruction*> &DeadInsts);
601 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
602 SCEVExpander &Rewriter,
603 Instruction *IP, Loop *L);
608 void BasedUser::dump() const {
609 cerr << " Base=" << *Base;
610 cerr << " Imm=" << *Imm;
611 cerr << " Inst: " << *Inst;
614 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
615 SCEVExpander &Rewriter,
616 Instruction *IP, Loop *L) {
617 // Figure out where we *really* want to insert this code. In particular, if
618 // the user is inside of a loop that is nested inside of L, we really don't
619 // want to insert this expression before the user, we'd rather pull it out as
620 // many loops as possible.
621 LoopInfo &LI = Rewriter.getLoopInfo();
622 Instruction *BaseInsertPt = IP;
624 // Figure out the most-nested loop that IP is in.
625 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
627 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
628 // the preheader of the outer-most loop where NewBase is not loop invariant.
629 if (L->contains(IP->getParent()))
630 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
631 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
632 InsertLoop = InsertLoop->getParentLoop();
635 // If there is no immediate value, skip the next part.
637 return Rewriter.expandCodeFor(NewBase, BaseInsertPt);
639 Value *Base = Rewriter.expandCodeFor(NewBase, BaseInsertPt);
641 // If we are inserting the base and imm values in the same block, make sure to
642 // adjust the IP position if insertion reused a result.
643 if (IP == BaseInsertPt)
644 IP = Rewriter.getInsertionPoint();
646 // Always emit the immediate (if non-zero) into the same block as the user.
647 SCEVHandle NewValSCEV = SE->getAddExpr(SE->getUnknown(Base), Imm);
648 return Rewriter.expandCodeFor(NewValSCEV, IP);
653 // Once we rewrite the code to insert the new IVs we want, update the
654 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
655 // to it. NewBasePt is the last instruction which contributes to the
656 // value of NewBase in the case that it's a diffferent instruction from
657 // the PHI that NewBase is computed from, or null otherwise.
659 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
660 Instruction *NewBasePt,
661 SCEVExpander &Rewriter, Loop *L, Pass *P,
662 SmallVectorImpl<Instruction*> &DeadInsts){
663 if (!isa<PHINode>(Inst)) {
664 // By default, insert code at the user instruction.
665 BasicBlock::iterator InsertPt = Inst;
667 // However, if the Operand is itself an instruction, the (potentially
668 // complex) inserted code may be shared by many users. Because of this, we
669 // want to emit code for the computation of the operand right before its old
670 // computation. This is usually safe, because we obviously used to use the
671 // computation when it was computed in its current block. However, in some
672 // cases (e.g. use of a post-incremented induction variable) the NewBase
673 // value will be pinned to live somewhere after the original computation.
674 // In this case, we have to back off.
676 // If this is a use outside the loop (which means after, since it is based
677 // on a loop indvar) we use the post-incremented value, so that we don't
678 // artificially make the preinc value live out the bottom of the loop.
679 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
680 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
681 InsertPt = NewBasePt;
683 } else if (Instruction *OpInst
684 = dyn_cast<Instruction>(OperandValToReplace)) {
686 while (isa<PHINode>(InsertPt)) ++InsertPt;
689 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
690 // Adjust the type back to match the Inst. Note that we can't use InsertPt
691 // here because the SCEVExpander may have inserted the instructions after
692 // that point, in its efforts to avoid inserting redundant expressions.
693 if (isa<PointerType>(OperandValToReplace->getType())) {
694 NewVal = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
696 OperandValToReplace->getType());
698 // Replace the use of the operand Value with the new Phi we just created.
699 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
700 DOUT << " CHANGED: IMM =" << *Imm;
701 DOUT << " \tNEWBASE =" << *NewBase;
702 DOUT << " \tInst = " << *Inst;
706 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
707 // expression into each operand block that uses it. Note that PHI nodes can
708 // have multiple entries for the same predecessor. We use a map to make sure
709 // that a PHI node only has a single Value* for each predecessor (which also
710 // prevents us from inserting duplicate code in some blocks).
711 DenseMap<BasicBlock*, Value*> InsertedCode;
712 PHINode *PN = cast<PHINode>(Inst);
713 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
714 if (PN->getIncomingValue(i) == OperandValToReplace) {
715 // If the original expression is outside the loop, put the replacement
716 // code in the same place as the original expression,
717 // which need not be an immediate predecessor of this PHI. This way we
718 // need only one copy of it even if it is referenced multiple times in
719 // the PHI. We don't do this when the original expression is inside the
720 // loop because multiple copies sometimes do useful sinking of code in that
722 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
723 if (L->contains(OldLoc->getParent())) {
724 // If this is a critical edge, split the edge so that we do not insert the
725 // code on all predecessor/successor paths. We do this unless this is the
726 // canonical backedge for this loop, as this can make some inserted code
727 // be in an illegal position.
728 BasicBlock *PHIPred = PN->getIncomingBlock(i);
729 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
730 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
732 // First step, split the critical edge.
733 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
735 // Next step: move the basic block. In particular, if the PHI node
736 // is outside of the loop, and PredTI is in the loop, we want to
737 // move the block to be immediately before the PHI block, not
738 // immediately after PredTI.
739 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
740 BasicBlock *NewBB = PN->getIncomingBlock(i);
741 NewBB->moveBefore(PN->getParent());
744 // Splitting the edge can reduce the number of PHI entries we have.
745 e = PN->getNumIncomingValues();
748 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
750 // Insert the code into the end of the predecessor block.
751 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
752 PN->getIncomingBlock(i)->getTerminator() :
753 OldLoc->getParent()->getTerminator();
754 Code = InsertCodeForBaseAtPosition(NewBase, Rewriter, InsertPt, L);
756 // Adjust the type back to match the PHI. Note that we can't use
757 // InsertPt here because the SCEVExpander may have inserted its
758 // instructions after that point, in its efforts to avoid inserting
759 // redundant expressions.
760 if (isa<PointerType>(PN->getType())) {
761 Code = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr,
767 // Replace the use of the operand Value with the new Phi we just created.
768 PN->setIncomingValue(i, Code);
773 // PHI node might have become a constant value after SplitCriticalEdge.
774 DeadInsts.push_back(Inst);
776 DOUT << " CHANGED: IMM =" << *Imm << " Inst = " << *Inst;
780 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
781 /// mode, and does not need to be put in a register first.
782 static bool fitsInAddressMode(const SCEVHandle &V, const Type *UseTy,
783 const TargetLowering *TLI, bool HasBaseReg) {
784 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
785 int64_t VC = SC->getValue()->getSExtValue();
787 TargetLowering::AddrMode AM;
789 AM.HasBaseReg = HasBaseReg;
790 return TLI->isLegalAddressingMode(AM, UseTy);
792 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
793 return (VC > -(1 << 16) && VC < (1 << 16)-1);
797 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
798 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
799 if (TLI && CE->getOpcode() == Instruction::PtrToInt) {
800 Constant *Op0 = CE->getOperand(0);
801 if (GlobalValue *GV = dyn_cast<GlobalValue>(Op0)) {
802 TargetLowering::AddrMode AM;
804 AM.HasBaseReg = HasBaseReg;
805 return TLI->isLegalAddressingMode(AM, UseTy);
811 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
812 /// loop varying to the Imm operand.
813 static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm,
814 Loop *L, ScalarEvolution *SE) {
815 if (Val->isLoopInvariant(L)) return; // Nothing to do.
817 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
818 std::vector<SCEVHandle> NewOps;
819 NewOps.reserve(SAE->getNumOperands());
821 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
822 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
823 // If this is a loop-variant expression, it must stay in the immediate
824 // field of the expression.
825 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
827 NewOps.push_back(SAE->getOperand(i));
831 Val = SE->getIntegerSCEV(0, Val->getType());
833 Val = SE->getAddExpr(NewOps);
834 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
835 // Try to pull immediates out of the start value of nested addrec's.
836 SCEVHandle Start = SARE->getStart();
837 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
839 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
841 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
843 // Otherwise, all of Val is variant, move the whole thing over.
844 Imm = SE->getAddExpr(Imm, Val);
845 Val = SE->getIntegerSCEV(0, Val->getType());
850 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
851 /// that can fit into the immediate field of instructions in the target.
852 /// Accumulate these immediate values into the Imm value.
853 static void MoveImmediateValues(const TargetLowering *TLI,
855 SCEVHandle &Val, SCEVHandle &Imm,
856 bool isAddress, Loop *L,
857 ScalarEvolution *SE) {
858 const Type *UseTy = User->getType();
859 if (StoreInst *SI = dyn_cast<StoreInst>(User))
860 UseTy = SI->getOperand(0)->getType();
862 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
863 std::vector<SCEVHandle> NewOps;
864 NewOps.reserve(SAE->getNumOperands());
866 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
867 SCEVHandle NewOp = SAE->getOperand(i);
868 MoveImmediateValues(TLI, User, NewOp, Imm, isAddress, L, SE);
870 if (!NewOp->isLoopInvariant(L)) {
871 // If this is a loop-variant expression, it must stay in the immediate
872 // field of the expression.
873 Imm = SE->getAddExpr(Imm, NewOp);
875 NewOps.push_back(NewOp);
880 Val = SE->getIntegerSCEV(0, Val->getType());
882 Val = SE->getAddExpr(NewOps);
884 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
885 // Try to pull immediates out of the start value of nested addrec's.
886 SCEVHandle Start = SARE->getStart();
887 MoveImmediateValues(TLI, User, Start, Imm, isAddress, L, SE);
889 if (Start != SARE->getStart()) {
890 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
892 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
895 } else if (SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
896 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
897 if (isAddress && fitsInAddressMode(SME->getOperand(0), UseTy, TLI, false) &&
898 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
900 SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType());
901 SCEVHandle NewOp = SME->getOperand(1);
902 MoveImmediateValues(TLI, User, NewOp, SubImm, isAddress, L, SE);
904 // If we extracted something out of the subexpressions, see if we can
906 if (NewOp != SME->getOperand(1)) {
907 // Scale SubImm up by "8". If the result is a target constant, we are
909 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
910 if (fitsInAddressMode(SubImm, UseTy, TLI, false)) {
911 // Accumulate the immediate.
912 Imm = SE->getAddExpr(Imm, SubImm);
914 // Update what is left of 'Val'.
915 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
922 // Loop-variant expressions must stay in the immediate field of the
924 if ((isAddress && fitsInAddressMode(Val, UseTy, TLI, false)) ||
925 !Val->isLoopInvariant(L)) {
926 Imm = SE->getAddExpr(Imm, Val);
927 Val = SE->getIntegerSCEV(0, Val->getType());
931 // Otherwise, no immediates to move.
935 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
936 /// added together. This is used to reassociate common addition subexprs
937 /// together for maximal sharing when rewriting bases.
938 static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
940 ScalarEvolution *SE) {
941 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
942 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
943 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
944 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
945 SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
946 if (SARE->getOperand(0) == Zero) {
947 SubExprs.push_back(Expr);
949 // Compute the addrec with zero as its base.
950 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
951 Ops[0] = Zero; // Start with zero base.
952 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
955 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
957 } else if (!Expr->isZero()) {
959 SubExprs.push_back(Expr);
963 // This is logically local to the following function, but C++ says we have
964 // to make it file scope.
965 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
967 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
968 /// the Uses, removing any common subexpressions, except that if all such
969 /// subexpressions can be folded into an addressing mode for all uses inside
970 /// the loop (this case is referred to as "free" in comments herein) we do
971 /// not remove anything. This looks for things like (a+b+c) and
972 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
973 /// is *removed* from the Bases and returned.
975 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
976 ScalarEvolution *SE, Loop *L,
977 const TargetLowering *TLI) {
978 unsigned NumUses = Uses.size();
980 // Only one use? This is a very common case, so we handle it specially and
982 SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
983 SCEVHandle Result = Zero;
984 SCEVHandle FreeResult = Zero;
986 // If the use is inside the loop, use its base, regardless of what it is:
987 // it is clearly shared across all the IV's. If the use is outside the loop
988 // (which means after it) we don't want to factor anything *into* the loop,
989 // so just use 0 as the base.
990 if (L->contains(Uses[0].Inst->getParent()))
991 std::swap(Result, Uses[0].Base);
995 // To find common subexpressions, count how many of Uses use each expression.
996 // If any subexpressions are used Uses.size() times, they are common.
997 // Also track whether all uses of each expression can be moved into an
998 // an addressing mode "for free"; such expressions are left within the loop.
999 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
1000 std::map<SCEVHandle, SubExprUseData> SubExpressionUseData;
1002 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
1003 // order we see them.
1004 std::vector<SCEVHandle> UniqueSubExprs;
1006 std::vector<SCEVHandle> SubExprs;
1007 unsigned NumUsesInsideLoop = 0;
1008 for (unsigned i = 0; i != NumUses; ++i) {
1009 // If the user is outside the loop, just ignore it for base computation.
1010 // Since the user is outside the loop, it must be *after* the loop (if it
1011 // were before, it could not be based on the loop IV). We don't want users
1012 // after the loop to affect base computation of values *inside* the loop,
1013 // because we can always add their offsets to the result IV after the loop
1014 // is done, ensuring we get good code inside the loop.
1015 if (!L->contains(Uses[i].Inst->getParent()))
1017 NumUsesInsideLoop++;
1019 // If the base is zero (which is common), return zero now, there are no
1020 // CSEs we can find.
1021 if (Uses[i].Base == Zero) return Zero;
1023 // If this use is as an address we may be able to put CSEs in the addressing
1024 // mode rather than hoisting them.
1025 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
1026 // We may need the UseTy below, but only when isAddrUse, so compute it
1027 // only in that case.
1028 const Type *UseTy = 0;
1030 UseTy = Uses[i].Inst->getType();
1031 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1032 UseTy = SI->getOperand(0)->getType();
1035 // Split the expression into subexprs.
1036 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1037 // Add one to SubExpressionUseData.Count for each subexpr present, and
1038 // if the subexpr is not a valid immediate within an addressing mode use,
1039 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
1040 // hoist these out of the loop (if they are common to all uses).
1041 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1042 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
1043 UniqueSubExprs.push_back(SubExprs[j]);
1044 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], UseTy, TLI, false))
1045 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
1050 // Now that we know how many times each is used, build Result. Iterate over
1051 // UniqueSubexprs so that we have a stable ordering.
1052 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
1053 std::map<SCEVHandle, SubExprUseData>::iterator I =
1054 SubExpressionUseData.find(UniqueSubExprs[i]);
1055 assert(I != SubExpressionUseData.end() && "Entry not found?");
1056 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
1057 if (I->second.notAllUsesAreFree)
1058 Result = SE->getAddExpr(Result, I->first);
1060 FreeResult = SE->getAddExpr(FreeResult, I->first);
1062 // Remove non-cse's from SubExpressionUseData.
1063 SubExpressionUseData.erase(I);
1066 if (FreeResult != Zero) {
1067 // We have some subexpressions that can be subsumed into addressing
1068 // modes in every use inside the loop. However, it's possible that
1069 // there are so many of them that the combined FreeResult cannot
1070 // be subsumed, or that the target cannot handle both a FreeResult
1071 // and a Result in the same instruction (for example because it would
1072 // require too many registers). Check this.
1073 for (unsigned i=0; i<NumUses; ++i) {
1074 if (!L->contains(Uses[i].Inst->getParent()))
1076 // We know this is an addressing mode use; if there are any uses that
1077 // are not, FreeResult would be Zero.
1078 const Type *UseTy = Uses[i].Inst->getType();
1079 if (StoreInst *SI = dyn_cast<StoreInst>(Uses[i].Inst))
1080 UseTy = SI->getOperand(0)->getType();
1081 if (!fitsInAddressMode(FreeResult, UseTy, TLI, Result!=Zero)) {
1082 // FIXME: could split up FreeResult into pieces here, some hoisted
1083 // and some not. There is no obvious advantage to this.
1084 Result = SE->getAddExpr(Result, FreeResult);
1091 // If we found no CSE's, return now.
1092 if (Result == Zero) return Result;
1094 // If we still have a FreeResult, remove its subexpressions from
1095 // SubExpressionUseData. This means they will remain in the use Bases.
1096 if (FreeResult != Zero) {
1097 SeparateSubExprs(SubExprs, FreeResult, SE);
1098 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
1099 std::map<SCEVHandle, SubExprUseData>::iterator I =
1100 SubExpressionUseData.find(SubExprs[j]);
1101 SubExpressionUseData.erase(I);
1106 // Otherwise, remove all of the CSE's we found from each of the base values.
1107 for (unsigned i = 0; i != NumUses; ++i) {
1108 // Uses outside the loop don't necessarily include the common base, but
1109 // the final IV value coming into those uses does. Instead of trying to
1110 // remove the pieces of the common base, which might not be there,
1111 // subtract off the base to compensate for this.
1112 if (!L->contains(Uses[i].Inst->getParent())) {
1113 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
1117 // Split the expression into subexprs.
1118 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
1120 // Remove any common subexpressions.
1121 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
1122 if (SubExpressionUseData.count(SubExprs[j])) {
1123 SubExprs.erase(SubExprs.begin()+j);
1127 // Finally, add the non-shared expressions together.
1128 if (SubExprs.empty())
1129 Uses[i].Base = Zero;
1131 Uses[i].Base = SE->getAddExpr(SubExprs);
1138 /// ValidStride - Check whether the given Scale is valid for all loads and
1139 /// stores in UsersToProcess.
1141 bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
1143 const std::vector<BasedUser>& UsersToProcess) {
1147 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
1148 // If this is a load or other access, pass the type of the access in.
1149 const Type *AccessTy = Type::VoidTy;
1150 if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].Inst))
1151 AccessTy = SI->getOperand(0)->getType();
1152 else if (LoadInst *LI = dyn_cast<LoadInst>(UsersToProcess[i].Inst))
1153 AccessTy = LI->getType();
1154 else if (isa<PHINode>(UsersToProcess[i].Inst))
1157 TargetLowering::AddrMode AM;
1158 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
1159 AM.BaseOffs = SC->getValue()->getSExtValue();
1160 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
1163 // If load[imm+r*scale] is illegal, bail out.
1164 if (!TLI->isLegalAddressingMode(AM, AccessTy))
1170 /// RequiresTypeConversion - Returns true if converting Ty to NewTy is not
1172 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
1176 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
1178 return (!Ty1->canLosslesslyBitCastTo(Ty2) &&
1179 !(isa<PointerType>(Ty2) &&
1180 Ty1->canLosslesslyBitCastTo(UIntPtrTy)) &&
1181 !(isa<PointerType>(Ty1) &&
1182 Ty2->canLosslesslyBitCastTo(UIntPtrTy)));
1185 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
1186 /// of a previous stride and it is a legal value for the target addressing
1187 /// mode scale component and optional base reg. This allows the users of
1188 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
1189 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
1191 /// If all uses are outside the loop, we don't require that all multiplies
1192 /// be folded into the addressing mode; a multiply (executed once) outside
1193 /// the loop is better than another IV within. Well, usually.
1194 int64_t LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
1195 bool AllUsesAreAddresses,
1196 bool AllUsesAreOutsideLoop,
1197 const SCEVHandle &Stride,
1198 IVExpr &IV, const Type *Ty,
1199 const std::vector<BasedUser>& UsersToProcess) {
1200 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
1201 int64_t SInt = SC->getValue()->getSExtValue();
1202 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1204 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1205 IVsByStride.find(StrideOrder[NewStride]);
1206 if (SI == IVsByStride.end())
1208 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1209 if (SI->first != Stride &&
1210 (unsigned(abs(SInt)) < SSInt || (SInt % SSInt) != 0))
1212 int64_t Scale = SInt / SSInt;
1213 // Check that this stride is valid for all the types used for loads and
1214 // stores; if it can be used for some and not others, we might as well use
1215 // the original stride everywhere, since we have to create the IV for it
1216 // anyway. If the scale is 1, then we don't need to worry about folding
1219 (AllUsesAreAddresses &&
1220 ValidStride(HasBaseReg, Scale, UsersToProcess)))
1221 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1222 IE = SI->second.IVs.end(); II != IE; ++II)
1223 // FIXME: Only handle base == 0 for now.
1224 // Only reuse previous IV if it would not require a type conversion.
1225 if (II->Base->isZero() &&
1226 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1231 } else if (AllUsesAreOutsideLoop) {
1232 // Accept nonconstant strides here; it is really really right to substitute
1233 // an existing IV if we can.
1234 // Special case, old IV is -1*x and this one is x. Can treat this one as
1236 for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
1238 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1239 IVsByStride.find(StrideOrder[NewStride]);
1240 if (SI == IVsByStride.end())
1242 if (SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1243 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1244 if (Stride == ME->getOperand(1) &&
1245 SC->getValue()->getSExtValue() == -1LL)
1246 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1247 IE = SI->second.IVs.end(); II != IE; ++II)
1248 // Accept nonzero base here.
1249 // Only reuse previous IV if it would not require a type conversion.
1250 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1259 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1260 /// returns true if Val's isUseOfPostIncrementedValue is true.
1261 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1262 return Val.isUseOfPostIncrementedValue;
1265 /// isNonConstantNegative - Return true if the specified scev is negated, but
1267 static bool isNonConstantNegative(const SCEVHandle &Expr) {
1268 SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1269 if (!Mul) return false;
1271 // If there is a constant factor, it will be first.
1272 SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1273 if (!SC) return false;
1275 // Return true if the value is negative, this matches things like (-42 * V).
1276 return SC->getValue()->getValue().isNegative();
1279 // CollectIVUsers - Transform our list of users and offsets to a bit more
1280 // complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1281 // of the strided accesses, as well as the old information from Uses. We
1282 // progressively move information from the Base field to the Imm field, until
1283 // we eventually have the full access expression to rewrite the use.
1284 SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride,
1285 IVUsersOfOneStride &Uses,
1287 bool &AllUsesAreAddresses,
1288 bool &AllUsesAreOutsideLoop,
1289 std::vector<BasedUser> &UsersToProcess) {
1290 UsersToProcess.reserve(Uses.Users.size());
1291 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i) {
1292 UsersToProcess.push_back(BasedUser(Uses.Users[i], SE));
1294 // Move any loop variant operands from the offset field to the immediate
1295 // field of the use, so that we don't try to use something before it is
1297 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1298 UsersToProcess.back().Imm, L, SE);
1299 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1300 "Base value is not loop invariant!");
1303 // We now have a whole bunch of uses of like-strided induction variables, but
1304 // they might all have different bases. We want to emit one PHI node for this
1305 // stride which we fold as many common expressions (between the IVs) into as
1306 // possible. Start by identifying the common expressions in the base values
1307 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1308 // "A+B"), emit it to the preheader, then remove the expression from the
1309 // UsersToProcess base values.
1310 SCEVHandle CommonExprs =
1311 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1313 // Next, figure out what we can represent in the immediate fields of
1314 // instructions. If we can represent anything there, move it to the imm
1315 // fields of the BasedUsers. We do this so that it increases the commonality
1316 // of the remaining uses.
1317 unsigned NumPHI = 0;
1318 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1319 // If the user is not in the current loop, this means it is using the exit
1320 // value of the IV. Do not put anything in the base, make sure it's all in
1321 // the immediate field to allow as much factoring as possible.
1322 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1323 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1324 UsersToProcess[i].Base);
1325 UsersToProcess[i].Base =
1326 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1329 // Addressing modes can be folded into loads and stores. Be careful that
1330 // the store is through the expression, not of the expression though.
1332 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1333 UsersToProcess[i].OperandValToReplace);
1334 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1339 // Not all uses are outside the loop.
1340 AllUsesAreOutsideLoop = false;
1342 // If this use isn't an address, then not all uses are addresses.
1343 if (!isAddress && !isPHI)
1344 AllUsesAreAddresses = false;
1346 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1347 UsersToProcess[i].Imm, isAddress, L, SE);
1351 // If one of the use if a PHI node and all other uses are addresses, still
1352 // allow iv reuse. Essentially we are trading one constant multiplication
1353 // for one fewer iv.
1355 AllUsesAreAddresses = false;
1360 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1361 /// stride of IV. All of the users may have different starting values, and this
1362 /// may not be the only stride (we know it is if isOnlyStride is true).
1363 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
1364 IVUsersOfOneStride &Uses,
1366 bool isOnlyStride) {
1367 // If all the users are moved to another stride, then there is nothing to do.
1368 if (Uses.Users.empty())
1371 // Keep track if every use in UsersToProcess is an address. If they all are,
1372 // we may be able to rewrite the entire collection of them in terms of a
1373 // smaller-stride IV.
1374 bool AllUsesAreAddresses = true;
1376 // Keep track if every use of a single stride is outside the loop. If so,
1377 // we want to be more aggressive about reusing a smaller-stride IV; a
1378 // multiply outside the loop is better than another IV inside. Well, usually.
1379 bool AllUsesAreOutsideLoop = true;
1381 // Transform our list of users and offsets to a bit more complex table. In
1382 // this new vector, each 'BasedUser' contains 'Base' the base of the
1383 // strided accessas well as the old information from Uses. We progressively
1384 // move information from the Base field to the Imm field, until we eventually
1385 // have the full access expression to rewrite the use.
1386 std::vector<BasedUser> UsersToProcess;
1387 SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1388 AllUsesAreOutsideLoop,
1391 // If we managed to find some expressions in common, we'll need to carry
1392 // their value in a register and add it in for each use. This will take up
1393 // a register operand, which potentially restricts what stride values are
1395 bool HaveCommonExprs = !CommonExprs->isZero();
1397 // If all uses are addresses, check if it is possible to reuse an IV with a
1398 // stride that is a factor of this stride. And that the multiple is a number
1399 // that can be encoded in the scale field of the target addressing mode. And
1400 // that we will have a valid instruction after this substition, including the
1401 // immediate field, if any.
1402 PHINode *NewPHI = NULL;
1404 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1405 SE->getIntegerSCEV(0, Type::Int32Ty),
1407 int64_t RewriteFactor = 0;
1408 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1409 AllUsesAreOutsideLoop,
1410 Stride, ReuseIV, CommonExprs->getType(),
1412 if (RewriteFactor != 0) {
1413 DOUT << "BASED ON IV of STRIDE " << *ReuseIV.Stride
1414 << " and BASE " << *ReuseIV.Base << " :\n";
1415 NewPHI = ReuseIV.PHI;
1416 IncV = ReuseIV.IncV;
1419 const Type *ReplacedTy = CommonExprs->getType();
1421 // Now that we know what we need to do, insert the PHI node itself.
1423 DOUT << "INSERTING IV of TYPE " << *ReplacedTy << " of STRIDE "
1424 << *Stride << " and BASE " << *CommonExprs << ": ";
1426 SCEVExpander Rewriter(*SE, *LI);
1427 SCEVExpander PreheaderRewriter(*SE, *LI);
1429 BasicBlock *Preheader = L->getLoopPreheader();
1430 Instruction *PreInsertPt = Preheader->getTerminator();
1431 Instruction *PhiInsertBefore = L->getHeader()->begin();
1433 BasicBlock *LatchBlock = L->getLoopLatch();
1436 // Emit the initial base value into the loop preheader.
1438 = PreheaderRewriter.expandCodeFor(CommonExprs, PreInsertPt);
1440 if (RewriteFactor == 0) {
1441 // Create a new Phi for this base, and stick it in the loop header.
1442 NewPHI = PHINode::Create(ReplacedTy, "iv.", PhiInsertBefore);
1445 // Add common base to the new Phi node.
1446 NewPHI->addIncoming(CommonBaseV, Preheader);
1448 // If the stride is negative, insert a sub instead of an add for the
1450 bool isNegative = isNonConstantNegative(Stride);
1451 SCEVHandle IncAmount = Stride;
1453 IncAmount = SE->getNegativeSCEV(Stride);
1455 // Insert the stride into the preheader.
1456 Value *StrideV = PreheaderRewriter.expandCodeFor(IncAmount, PreInsertPt);
1457 if (!isa<ConstantInt>(StrideV)) ++NumVariable;
1459 // Emit the increment of the base value before the terminator of the loop
1460 // latch block, and add it to the Phi node.
1461 SCEVHandle IncExp = SE->getUnknown(StrideV);
1463 IncExp = SE->getNegativeSCEV(IncExp);
1464 IncExp = SE->getAddExpr(SE->getUnknown(NewPHI), IncExp);
1466 IncV = Rewriter.expandCodeFor(IncExp, LatchBlock->getTerminator());
1467 IncV->setName(NewPHI->getName()+".inc");
1468 NewPHI->addIncoming(IncV, LatchBlock);
1470 // Remember this in case a later stride is multiple of this.
1471 IVsByStride[Stride].addIV(Stride, CommonExprs, NewPHI, IncV);
1473 DOUT << " IV=%" << NewPHI->getNameStr() << " INC=%" << IncV->getNameStr();
1475 Constant *C = dyn_cast<Constant>(CommonBaseV);
1477 (!C->isNullValue() &&
1478 !fitsInAddressMode(SE->getUnknown(CommonBaseV), ReplacedTy,
1480 // We want the common base emitted into the preheader! This is just
1481 // using cast as a copy so BitCast (no-op cast) is appropriate
1482 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1483 "commonbase", PreInsertPt);
1487 // We want to emit code for users inside the loop first. To do this, we
1488 // rearrange BasedUser so that the entries at the end have
1489 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1490 // vector (so we handle them first).
1491 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1492 PartitionByIsUseOfPostIncrementedValue);
1494 // Sort this by base, so that things with the same base are handled
1495 // together. By partitioning first and stable-sorting later, we are
1496 // guaranteed that within each base we will pop off users from within the
1497 // loop before users outside of the loop with a particular base.
1499 // We would like to use stable_sort here, but we can't. The problem is that
1500 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1501 // we don't have anything to do a '<' comparison on. Because we think the
1502 // number of uses is small, do a horrible bubble sort which just relies on
1504 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1505 // Get a base value.
1506 SCEVHandle Base = UsersToProcess[i].Base;
1508 // Compact everything with this base to be consecutive with this one.
1509 for (unsigned j = i+1; j != e; ++j) {
1510 if (UsersToProcess[j].Base == Base) {
1511 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1517 // Process all the users now. This outer loop handles all bases, the inner
1518 // loop handles all users of a particular base.
1519 while (!UsersToProcess.empty()) {
1520 SCEVHandle Base = UsersToProcess.back().Base;
1522 // Emit the code for Base into the preheader.
1523 Value *BaseV = PreheaderRewriter.expandCodeFor(Base, PreInsertPt);
1525 DOUT << " INSERTING code for BASE = " << *Base << ":";
1526 if (BaseV->hasName())
1527 DOUT << " Result value name = %" << BaseV->getNameStr();
1530 // If BaseV is a constant other than 0, make sure that it gets inserted into
1531 // the preheader, instead of being forward substituted into the uses. We do
1532 // this by forcing a BitCast (noop cast) to be inserted into the preheader
1534 if (Constant *C = dyn_cast<Constant>(BaseV)) {
1535 if (!C->isNullValue() && !fitsInAddressMode(Base, ReplacedTy,
1537 // We want this constant emitted into the preheader! This is just
1538 // using cast as a copy so BitCast (no-op cast) is appropriate
1539 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1544 // Emit the code to add the immediate offset to the Phi value, just before
1545 // the instructions that we identified as using this stride and base.
1547 // FIXME: Use emitted users to emit other users.
1548 BasedUser &User = UsersToProcess.back();
1550 // If this instruction wants to use the post-incremented value, move it
1551 // after the post-inc and use its value instead of the PHI.
1552 Value *RewriteOp = NewPHI;
1553 if (User.isUseOfPostIncrementedValue) {
1556 // If this user is in the loop, make sure it is the last thing in the
1557 // loop to ensure it is dominated by the increment.
1558 if (L->contains(User.Inst->getParent()))
1559 User.Inst->moveBefore(LatchBlock->getTerminator());
1561 if (RewriteOp->getType() != ReplacedTy) {
1562 Instruction::CastOps opcode = Instruction::Trunc;
1563 if (ReplacedTy->getPrimitiveSizeInBits() ==
1564 RewriteOp->getType()->getPrimitiveSizeInBits())
1565 opcode = Instruction::BitCast;
1566 RewriteOp = SCEVExpander::InsertCastOfTo(opcode, RewriteOp, ReplacedTy);
1569 SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
1571 // If we had to insert new instructions for RewriteOp, we have to
1572 // consider that they may not have been able to end up immediately
1573 // next to RewriteOp, because non-PHI instructions may never precede
1574 // PHI instructions in a block. In this case, remember where the last
1575 // instruction was inserted so that if we're replacing a different
1576 // PHI node, we can use the later point to expand the final
1578 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1579 if (RewriteOp == NewPHI) NewBasePt = 0;
1581 // Clear the SCEVExpander's expression map so that we are guaranteed
1582 // to have the code emitted where we expect it.
1585 // If we are reusing the iv, then it must be multiplied by a constant
1586 // factor take advantage of addressing mode scale component.
1587 if (RewriteFactor != 0) {
1588 RewriteExpr = SE->getMulExpr(SE->getIntegerSCEV(RewriteFactor,
1589 RewriteExpr->getType()),
1592 // The common base is emitted in the loop preheader. But since we
1593 // are reusing an IV, it has not been used to initialize the PHI node.
1594 // Add it to the expression used to rewrite the uses.
1595 if (!isa<ConstantInt>(CommonBaseV) ||
1596 !cast<ConstantInt>(CommonBaseV)->isZero())
1597 RewriteExpr = SE->getAddExpr(RewriteExpr,
1598 SE->getUnknown(CommonBaseV));
1599 // If we're reusing an IV with a nonzero base (currently this happens
1600 // only when all reuses are outside the loop) subtract out that base here
1601 // This is the reverse of the above; the base HAS been used to initialize
1602 // the PHI node but we don't want it here.
1603 // (If the RewriteFactor is negative, we're effectively negating the
1604 // old IV in this use, so we add the base instead of subtract.)
1605 if (!ReuseIV.Base->isZero()) {
1606 if (RewriteFactor < 0)
1607 RewriteExpr = SE->getAddExpr(RewriteExpr, ReuseIV.Base);
1609 RewriteExpr = SE->getMinusSCEV(RewriteExpr, ReuseIV.Base);
1613 // Now that we know what we need to do, insert code before User for the
1614 // immediate and any loop-variant expressions.
1615 if (!isa<ConstantInt>(BaseV) || !cast<ConstantInt>(BaseV)->isZero())
1616 // Add BaseV to the PHI value if needed.
1617 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1619 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1623 // Mark old value we replaced as possibly dead, so that it is eliminated
1624 // if we just replaced the last use of that value.
1625 DeadInsts.push_back(cast<Instruction>(User.OperandValToReplace));
1627 UsersToProcess.pop_back();
1630 // If there are any more users to process with the same base, process them
1631 // now. We sorted by base above, so we just have to check the last elt.
1632 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1633 // TODO: Next, find out which base index is the most common, pull it out.
1636 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1637 // different starting values, into different PHIs.
1640 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1641 /// set the IV user and stride information and return true, otherwise return
1643 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
1644 const SCEVHandle *&CondStride) {
1645 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e && !CondUse;
1647 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
1648 IVUsesByStride.find(StrideOrder[Stride]);
1649 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
1651 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
1652 E = SI->second.Users.end(); UI != E; ++UI)
1653 if (UI->User == Cond) {
1654 // NOTE: we could handle setcc instructions with multiple uses here, but
1655 // InstCombine does it as well for simple uses, it's not clear that it
1656 // occurs enough in real life to handle.
1658 CondStride = &SI->first;
1666 // Constant strides come first which in turns are sorted by their absolute
1667 // values. If absolute values are the same, then positive strides comes first.
1669 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1670 struct StrideCompare {
1671 bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
1672 SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1673 SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1675 int64_t LV = LHSC->getValue()->getSExtValue();
1676 int64_t RV = RHSC->getValue()->getSExtValue();
1677 uint64_t ALV = (LV < 0) ? -LV : LV;
1678 uint64_t ARV = (RV < 0) ? -RV : RV;
1684 return (LHSC && !RHSC);
1689 /// ChangeCompareStride - If a loop termination compare instruction is the
1690 /// only use of its stride, and the compaison is against a constant value,
1691 /// try eliminate the stride by moving the compare instruction to another
1692 /// stride and change its constant operand accordingly. e.g.
1698 /// if (v2 < 10) goto loop
1703 /// if (v1 < 30) goto loop
1704 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1705 IVStrideUse* &CondUse,
1706 const SCEVHandle* &CondStride) {
1707 if (StrideOrder.size() < 2 ||
1708 IVUsesByStride[*CondStride].Users.size() != 1)
1710 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
1711 if (!SC) return Cond;
1712 ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1));
1713 if (!C) return Cond;
1715 ICmpInst::Predicate Predicate = Cond->getPredicate();
1716 int64_t CmpSSInt = SC->getValue()->getSExtValue();
1717 int64_t CmpVal = C->getValue().getSExtValue();
1718 unsigned BitWidth = C->getValue().getBitWidth();
1719 uint64_t SignBit = 1ULL << (BitWidth-1);
1720 const Type *CmpTy = C->getType();
1721 const Type *NewCmpTy = NULL;
1722 unsigned TyBits = CmpTy->getPrimitiveSizeInBits();
1723 unsigned NewTyBits = 0;
1724 int64_t NewCmpVal = CmpVal;
1725 SCEVHandle *NewStride = NULL;
1726 Value *NewIncV = NULL;
1729 // Check stride constant and the comparision constant signs to detect
1731 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1734 // Look for a suitable stride / iv as replacement.
1735 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
1736 for (unsigned i = 0, e = StrideOrder.size(); i != e; ++i) {
1737 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
1738 IVUsesByStride.find(StrideOrder[i]);
1739 if (!isa<SCEVConstant>(SI->first))
1741 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1742 if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
1745 Scale = SSInt / CmpSSInt;
1746 NewCmpVal = CmpVal * Scale;
1747 APInt Mul = APInt(BitWidth, NewCmpVal);
1748 // Check for overflow.
1749 if (Mul.getSExtValue() != NewCmpVal) {
1754 // Watch out for overflow.
1755 if (ICmpInst::isSignedPredicate(Predicate) &&
1756 (CmpVal & SignBit) != (NewCmpVal & SignBit))
1759 if (NewCmpVal != CmpVal) {
1760 // Pick the best iv to use trying to avoid a cast.
1762 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
1763 E = SI->second.Users.end(); UI != E; ++UI) {
1764 NewIncV = UI->OperandValToReplace;
1765 if (NewIncV->getType() == CmpTy)
1773 NewCmpTy = NewIncV->getType();
1774 NewTyBits = isa<PointerType>(NewCmpTy)
1775 ? UIntPtrTy->getPrimitiveSizeInBits()
1776 : NewCmpTy->getPrimitiveSizeInBits();
1777 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1778 // Check if it is possible to rewrite it using
1779 // an iv / stride of a smaller integer type.
1780 bool TruncOk = false;
1781 if (NewCmpTy->isInteger()) {
1782 unsigned Bits = NewTyBits;
1783 if (ICmpInst::isSignedPredicate(Predicate))
1785 uint64_t Mask = (1ULL << Bits) - 1;
1786 if (((uint64_t)NewCmpVal & Mask) == (uint64_t)NewCmpVal)
1795 // Don't rewrite if use offset is non-constant and the new type is
1796 // of a different type.
1797 // FIXME: too conservative?
1798 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->Offset)) {
1803 bool AllUsesAreAddresses = true;
1804 bool AllUsesAreOutsideLoop = true;
1805 std::vector<BasedUser> UsersToProcess;
1806 SCEVHandle CommonExprs = CollectIVUsers(SI->first, SI->second, L,
1807 AllUsesAreAddresses,
1808 AllUsesAreOutsideLoop,
1810 // Avoid rewriting the compare instruction with an iv of new stride
1811 // if it's likely the new stride uses will be rewritten using the
1812 if (AllUsesAreAddresses &&
1813 ValidStride(!CommonExprs->isZero(), Scale, UsersToProcess)) {
1818 // If scale is negative, use swapped predicate unless it's testing
1820 if (Scale < 0 && !Cond->isEquality())
1821 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1823 NewStride = &StrideOrder[i];
1828 // Forgo this transformation if it the increment happens to be
1829 // unfortunately positioned after the condition, and the condition
1830 // has multiple uses which prevent it from being moved immediately
1831 // before the branch. See
1832 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
1833 // for an example of this situation.
1834 if (!Cond->hasOneUse()) {
1835 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
1841 if (NewCmpVal != CmpVal) {
1842 // Create a new compare instruction using new stride / iv.
1843 ICmpInst *OldCond = Cond;
1845 if (!isa<PointerType>(NewCmpTy))
1846 RHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1848 RHS = ConstantInt::get(UIntPtrTy, NewCmpVal);
1849 RHS = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr, RHS, NewCmpTy);
1851 // Insert new compare instruction.
1852 Cond = new ICmpInst(Predicate, NewIncV, RHS,
1853 L->getHeader()->getName() + ".termcond",
1856 // Remove the old compare instruction. The old indvar is probably dead too.
1857 DeadInsts.push_back(cast<Instruction>(CondUse->OperandValToReplace));
1858 SE->deleteValueFromRecords(OldCond);
1859 OldCond->replaceAllUsesWith(Cond);
1860 OldCond->eraseFromParent();
1862 IVUsesByStride[*CondStride].Users.pop_back();
1863 SCEVHandle NewOffset = TyBits == NewTyBits
1864 ? SE->getMulExpr(CondUse->Offset,
1865 SE->getConstant(ConstantInt::get(CmpTy, Scale)))
1866 : SE->getConstant(ConstantInt::get(NewCmpTy,
1867 cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
1868 IVUsesByStride[*NewStride].addUser(NewOffset, Cond, NewIncV);
1869 CondUse = &IVUsesByStride[*NewStride].Users.back();
1870 CondStride = NewStride;
1877 /// OptimizeSMax - Rewrite the loop's terminating condition if it uses
1878 /// an smax computation.
1880 /// This is a narrow solution to a specific, but acute, problem. For loops
1886 /// } while (++i < n);
1888 /// where the comparison is signed, the trip count isn't just 'n', because
1889 /// 'n' could be negative. And unfortunately this can come up even for loops
1890 /// where the user didn't use a C do-while loop. For example, seemingly
1891 /// well-behaved top-test loops will commonly be lowered like this:
1897 /// } while (++i < n);
1900 /// and then it's possible for subsequent optimization to obscure the if
1901 /// test in such a way that indvars can't find it.
1903 /// When indvars can't find the if test in loops like this, it creates a
1904 /// signed-max expression, which allows it to give the loop a canonical
1905 /// induction variable:
1908 /// smax = n < 1 ? 1 : n;
1911 /// } while (++i != smax);
1913 /// Canonical induction variables are necessary because the loop passes
1914 /// are designed around them. The most obvious example of this is the
1915 /// LoopInfo analysis, which doesn't remember trip count values. It
1916 /// expects to be able to rediscover the trip count each time it is
1917 /// needed, and it does this using a simple analyis that only succeeds if
1918 /// the loop has a canonical induction variable.
1920 /// However, when it comes time to generate code, the maximum operation
1921 /// can be quite costly, especially if it's inside of an outer loop.
1923 /// This function solves this problem by detecting this type of loop and
1924 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
1925 /// the instructions for the maximum computation.
1927 ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond,
1928 IVStrideUse* &CondUse) {
1929 // Check that the loop matches the pattern we're looking for.
1930 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
1931 Cond->getPredicate() != CmpInst::ICMP_NE)
1934 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
1935 if (!Sel || !Sel->hasOneUse()) return Cond;
1937 SCEVHandle IterationCount = SE->getIterationCount(L);
1938 if (isa<SCEVCouldNotCompute>(IterationCount))
1940 SCEVHandle One = SE->getIntegerSCEV(1, IterationCount->getType());
1942 // Adjust for an annoying getIterationCount quirk.
1943 IterationCount = SE->getAddExpr(IterationCount, One);
1945 // Check for a max calculation that matches the pattern.
1946 SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(IterationCount);
1947 if (!SMax || SMax != SE->getSCEV(Sel)) return Cond;
1949 SCEVHandle SMaxLHS = SMax->getOperand(0);
1950 SCEVHandle SMaxRHS = SMax->getOperand(1);
1951 if (!SMaxLHS || SMaxLHS != One) return Cond;
1953 // Check the relevant induction variable for conformance to
1955 SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
1956 SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
1957 if (!AR || !AR->isAffine() ||
1958 AR->getStart() != One ||
1959 AR->getStepRecurrence(*SE) != One)
1962 // Check the right operand of the select, and remember it, as it will
1963 // be used in the new comparison instruction.
1965 if (SE->getSCEV(Sel->getOperand(1)) == SMaxRHS)
1966 NewRHS = Sel->getOperand(1);
1967 else if (SE->getSCEV(Sel->getOperand(2)) == SMaxRHS)
1968 NewRHS = Sel->getOperand(2);
1969 if (!NewRHS) return Cond;
1971 // Ok, everything looks ok to change the condition into an SLT or SGE and
1972 // delete the max calculation.
1974 new ICmpInst(Cond->getPredicate() == CmpInst::ICMP_NE ?
1977 Cond->getOperand(0), NewRHS, "scmp", Cond);
1979 // Delete the max calculation instructions.
1980 SE->deleteValueFromRecords(Cond);
1981 Cond->replaceAllUsesWith(NewCond);
1982 Cond->eraseFromParent();
1983 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
1984 SE->deleteValueFromRecords(Sel);
1985 Sel->eraseFromParent();
1986 if (Cmp->use_empty()) {
1987 SE->deleteValueFromRecords(Cmp);
1988 Cmp->eraseFromParent();
1990 CondUse->User = NewCond;
1994 /// OptimizeShadowIV - If IV is used in a int-to-float cast
1995 /// inside the loop then try to eliminate the cast opeation.
1996 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
1998 SCEVHandle IterationCount = SE->getIterationCount(L);
1999 if (isa<SCEVCouldNotCompute>(IterationCount))
2002 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e;
2004 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2005 IVUsesByStride.find(StrideOrder[Stride]);
2006 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2007 if (!isa<SCEVConstant>(SI->first))
2010 for (std::vector<IVStrideUse>::iterator UI = SI->second.Users.begin(),
2011 E = SI->second.Users.end(); UI != E; /* empty */) {
2012 std::vector<IVStrideUse>::iterator CandidateUI = UI;
2014 Instruction *ShadowUse = CandidateUI->User;
2015 const Type *DestTy = NULL;
2017 /* If shadow use is a int->float cast then insert a second IV
2018 to eliminate this cast.
2020 for (unsigned i = 0; i < n; ++i)
2026 for (unsigned i = 0; i < n; ++i, ++d)
2029 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->User))
2030 DestTy = UCast->getDestTy();
2031 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->User))
2032 DestTy = SCast->getDestTy();
2033 if (!DestTy) continue;
2036 /* If target does not support DestTy natively then do not apply
2037 this transformation. */
2038 MVT DVT = TLI->getValueType(DestTy);
2039 if (!TLI->isTypeLegal(DVT)) continue;
2042 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2044 if (PH->getNumIncomingValues() != 2) continue;
2046 const Type *SrcTy = PH->getType();
2047 int Mantissa = DestTy->getFPMantissaWidth();
2048 if (Mantissa == -1) continue;
2049 if ((int)TD->getTypeSizeInBits(SrcTy) > Mantissa)
2052 unsigned Entry, Latch;
2053 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2061 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2062 if (!Init) continue;
2063 ConstantFP *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2065 BinaryOperator *Incr =
2066 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2067 if (!Incr) continue;
2068 if (Incr->getOpcode() != Instruction::Add
2069 && Incr->getOpcode() != Instruction::Sub)
2072 /* Initialize new IV, double d = 0.0 in above example. */
2073 ConstantInt *C = NULL;
2074 if (Incr->getOperand(0) == PH)
2075 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2076 else if (Incr->getOperand(1) == PH)
2077 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2083 /* Add new PHINode. */
2084 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2086 /* create new increment. '++d' in above example. */
2087 ConstantFP *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2088 BinaryOperator *NewIncr =
2089 BinaryOperator::Create(Incr->getOpcode(),
2090 NewPH, CFP, "IV.S.next.", Incr);
2092 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2093 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2095 /* Remove cast operation */
2096 SE->deleteValueFromRecords(ShadowUse);
2097 ShadowUse->replaceAllUsesWith(NewPH);
2098 ShadowUse->eraseFromParent();
2099 SI->second.Users.erase(CandidateUI);
2106 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2107 // uses in the loop, look to see if we can eliminate some, in favor of using
2108 // common indvars for the different uses.
2109 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2110 // TODO: implement optzns here.
2112 OptimizeShadowIV(L);
2114 // Finally, get the terminating condition for the loop if possible. If we
2115 // can, we want to change it to use a post-incremented version of its
2116 // induction variable, to allow coalescing the live ranges for the IV into
2117 // one register value.
2118 PHINode *SomePHI = cast<PHINode>(L->getHeader()->begin());
2119 BasicBlock *Preheader = L->getLoopPreheader();
2120 BasicBlock *LatchBlock =
2121 SomePHI->getIncomingBlock(SomePHI->getIncomingBlock(0) == Preheader);
2122 BranchInst *TermBr = dyn_cast<BranchInst>(LatchBlock->getTerminator());
2123 if (!TermBr || TermBr->isUnconditional() ||
2124 !isa<ICmpInst>(TermBr->getCondition()))
2126 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2128 // Search IVUsesByStride to find Cond's IVUse if there is one.
2129 IVStrideUse *CondUse = 0;
2130 const SCEVHandle *CondStride = 0;
2132 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2133 return; // setcc doesn't use the IV.
2135 // If the trip count is computed in terms of an smax (due to ScalarEvolution
2136 // being unable to find a sufficient guard, for example), change the loop
2137 // comparison to use SLT instead of NE.
2138 Cond = OptimizeSMax(L, Cond, CondUse);
2140 // If possible, change stride and operands of the compare instruction to
2141 // eliminate one stride.
2142 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2144 // It's possible for the setcc instruction to be anywhere in the loop, and
2145 // possible for it to have multiple users. If it is not immediately before
2146 // the latch block branch, move it.
2147 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2148 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2149 Cond->moveBefore(TermBr);
2151 // Otherwise, clone the terminating condition and insert into the loopend.
2152 Cond = cast<ICmpInst>(Cond->clone());
2153 Cond->setName(L->getHeader()->getName() + ".termcond");
2154 LatchBlock->getInstList().insert(TermBr, Cond);
2156 // Clone the IVUse, as the old use still exists!
2157 IVUsesByStride[*CondStride].addUser(CondUse->Offset, Cond,
2158 CondUse->OperandValToReplace);
2159 CondUse = &IVUsesByStride[*CondStride].Users.back();
2163 // If we get to here, we know that we can transform the setcc instruction to
2164 // use the post-incremented version of the IV, allowing us to coalesce the
2165 // live ranges for the IV correctly.
2166 CondUse->Offset = SE->getMinusSCEV(CondUse->Offset, *CondStride);
2167 CondUse->isUseOfPostIncrementedValue = true;
2171 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2173 LI = &getAnalysis<LoopInfo>();
2174 DT = &getAnalysis<DominatorTree>();
2175 SE = &getAnalysis<ScalarEvolution>();
2176 TD = &getAnalysis<TargetData>();
2177 UIntPtrTy = TD->getIntPtrType();
2180 // Find all uses of induction variables in this loop, and categorize
2181 // them by stride. Start by finding all of the PHI nodes in the header for
2182 // this loop. If they are induction variables, inspect their uses.
2183 SmallPtrSet<Instruction*,16> Processed; // Don't reprocess instructions.
2184 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
2185 AddUsersIfInteresting(I, L, Processed);
2187 if (!IVUsesByStride.empty()) {
2188 // Optimize induction variables. Some indvar uses can be transformed to use
2189 // strides that will be needed for other purposes. A common example of this
2190 // is the exit test for the loop, which can often be rewritten to use the
2191 // computation of some other indvar to decide when to terminate the loop.
2194 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
2195 // doing computation in byte values, promote to 32-bit values if safe.
2197 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2198 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2199 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2200 // Need to be careful that IV's are all the same type. Only works for
2201 // intptr_t indvars.
2203 // If we only have one stride, we can more aggressively eliminate some
2205 bool HasOneStride = IVUsesByStride.size() == 1;
2208 DOUT << "\nLSR on ";
2212 // IVsByStride keeps IVs for one particular loop.
2213 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2215 // Sort the StrideOrder so we process larger strides first.
2216 std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
2218 // Note: this processes each stride/type pair individually. All users
2219 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2220 // Also, note that we iterate over IVUsesByStride indirectly by using
2221 // StrideOrder. This extra layer of indirection makes the ordering of
2222 // strides deterministic - not dependent on map order.
2223 for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
2224 std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI =
2225 IVUsesByStride.find(StrideOrder[Stride]);
2226 assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
2227 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
2231 // We're done analyzing this loop; release all the state we built up for it.
2232 CastedPointers.clear();
2233 IVUsesByStride.clear();
2234 IVsByStride.clear();
2235 StrideOrder.clear();
2237 // Clean up after ourselves
2238 if (!DeadInsts.empty()) {
2239 DeleteTriviallyDeadInstructions();
2241 BasicBlock::iterator I = L->getHeader()->begin();
2242 while (PHINode *PN = dyn_cast<PHINode>(I++)) {
2243 // At this point, we know that we have killed one or more IV users.
2244 // It is worth checking to see if the cannonical indvar is also
2245 // dead, so that we can remove it as well.
2247 // We can remove a PHI if it is on a cycle in the def-use graph
2248 // where each node in the cycle has degree one, i.e. only one use,
2249 // and is an instruction with no side effects.
2251 // FIXME: this needs to eliminate an induction variable even if it's being
2252 // compared against some value to decide loop termination.
2253 if (!PN->hasOneUse())
2256 SmallPtrSet<PHINode *, 4> PHIs;
2257 for (Instruction *J = dyn_cast<Instruction>(*PN->use_begin());
2258 J && J->hasOneUse() && !J->mayWriteToMemory();
2259 J = dyn_cast<Instruction>(*J->use_begin())) {
2260 // If we find the original PHI, we've discovered a cycle.
2262 // Break the cycle and mark the PHI for deletion.
2263 SE->deleteValueFromRecords(PN);
2264 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
2265 DeadInsts.push_back(PN);
2269 // If we find a PHI more than once, we're on a cycle that
2270 // won't prove fruitful.
2271 if (isa<PHINode>(J) && !PHIs.insert(cast<PHINode>(J)))
2275 DeleteTriviallyDeadInstructions();