1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
31 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
32 /// reusing an existing cast if a suitable one exists, moving an existing
33 /// cast if a suitable one exists but isn't in the right place, or
34 /// creating a new one.
35 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
36 Instruction::CastOps Op,
37 BasicBlock::iterator IP) {
38 // This function must be called with the builder having a valid insertion
39 // point. It doesn't need to be the actual IP where the uses of the returned
40 // cast will be added, but it must dominate such IP.
41 // We use this precondition to produce a cast that will dominate all its
42 // uses. In particular, this is crucial for the case where the builder's
43 // insertion point *is* the point where we were asked to put the cast.
44 // Since we don't know the builder's insertion point is actually
45 // where the uses will be added (only that it dominates it), we are
46 // not allowed to move it.
47 BasicBlock::iterator BIP = Builder.GetInsertPoint();
49 Instruction *Ret = nullptr;
51 // Check to see if there is already a cast!
52 for (User *U : V->users())
53 if (U->getType() == Ty)
54 if (CastInst *CI = dyn_cast<CastInst>(U))
55 if (CI->getOpcode() == Op) {
56 // If the cast isn't where we want it, create a new cast at IP.
57 // Likewise, do not reuse a cast at BIP because it must dominate
58 // instructions that might be inserted before BIP.
59 if (BasicBlock::iterator(CI) != IP || BIP == IP) {
60 // Create a new cast, and leave the old cast in place in case
61 // it is being used as an insert point. Clear its operand
62 // so that it doesn't hold anything live.
63 Ret = CastInst::Create(Op, V, Ty, "", IP);
65 CI->replaceAllUsesWith(Ret);
66 CI->setOperand(0, UndefValue::get(V->getType()));
75 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
77 // We assert at the end of the function since IP might point to an
78 // instruction with different dominance properties than a cast
79 // (an invoke for example) and not dominate BIP (but the cast does).
80 assert(SE.DT->dominates(Ret, BIP));
82 rememberInstruction(Ret);
86 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
87 /// which must be possible with a noop cast, doing what we can to share
89 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
90 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
91 assert((Op == Instruction::BitCast ||
92 Op == Instruction::PtrToInt ||
93 Op == Instruction::IntToPtr) &&
94 "InsertNoopCastOfTo cannot perform non-noop casts!");
95 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
96 "InsertNoopCastOfTo cannot change sizes!");
98 // Short-circuit unnecessary bitcasts.
99 if (Op == Instruction::BitCast) {
100 if (V->getType() == Ty)
102 if (CastInst *CI = dyn_cast<CastInst>(V)) {
103 if (CI->getOperand(0)->getType() == Ty)
104 return CI->getOperand(0);
107 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
108 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
109 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
110 if (CastInst *CI = dyn_cast<CastInst>(V))
111 if ((CI->getOpcode() == Instruction::PtrToInt ||
112 CI->getOpcode() == Instruction::IntToPtr) &&
113 SE.getTypeSizeInBits(CI->getType()) ==
114 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
115 return CI->getOperand(0);
116 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
117 if ((CE->getOpcode() == Instruction::PtrToInt ||
118 CE->getOpcode() == Instruction::IntToPtr) &&
119 SE.getTypeSizeInBits(CE->getType()) ==
120 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
121 return CE->getOperand(0);
124 // Fold a cast of a constant.
125 if (Constant *C = dyn_cast<Constant>(V))
126 return ConstantExpr::getCast(Op, C, Ty);
128 // Cast the argument at the beginning of the entry block, after
129 // any bitcasts of other arguments.
130 if (Argument *A = dyn_cast<Argument>(V)) {
131 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
132 while ((isa<BitCastInst>(IP) &&
133 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
134 cast<BitCastInst>(IP)->getOperand(0) != A) ||
135 isa<DbgInfoIntrinsic>(IP) ||
136 isa<LandingPadInst>(IP))
138 return ReuseOrCreateCast(A, Ty, Op, IP);
141 // Cast the instruction immediately after the instruction.
142 Instruction *I = cast<Instruction>(V);
143 BasicBlock::iterator IP = I; ++IP;
144 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
145 IP = II->getNormalDest()->begin();
146 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
148 return ReuseOrCreateCast(I, Ty, Op, IP);
151 /// InsertBinop - Insert the specified binary operator, doing a small amount
152 /// of work to avoid inserting an obviously redundant operation.
153 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
154 Value *LHS, Value *RHS) {
155 // Fold a binop with constant operands.
156 if (Constant *CLHS = dyn_cast<Constant>(LHS))
157 if (Constant *CRHS = dyn_cast<Constant>(RHS))
158 return ConstantExpr::get(Opcode, CLHS, CRHS);
160 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
161 unsigned ScanLimit = 6;
162 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
163 // Scanning starts from the last instruction before the insertion point.
164 BasicBlock::iterator IP = Builder.GetInsertPoint();
165 if (IP != BlockBegin) {
167 for (; ScanLimit; --IP, --ScanLimit) {
168 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
170 if (isa<DbgInfoIntrinsic>(IP))
172 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
173 IP->getOperand(1) == RHS)
175 if (IP == BlockBegin) break;
179 // Save the original insertion point so we can restore it when we're done.
180 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
181 BuilderType::InsertPointGuard Guard(Builder);
183 // Move the insertion point out of as many loops as we can.
184 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
185 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
186 BasicBlock *Preheader = L->getLoopPreheader();
187 if (!Preheader) break;
189 // Ok, move up a level.
190 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
193 // If we haven't found this binop, insert it.
194 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
195 BO->setDebugLoc(Loc);
196 rememberInstruction(BO);
201 /// FactorOutConstant - Test if S is divisible by Factor, using signed
202 /// division. If so, update S with Factor divided out and return true.
203 /// S need not be evenly divisible if a reasonable remainder can be
205 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
206 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
207 /// check to see if the divide was folded.
208 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
209 const SCEV *Factor, ScalarEvolution &SE,
210 const DataLayout &DL) {
211 // Everything is divisible by one.
217 S = SE.getConstant(S->getType(), 1);
221 // For a Constant, check for a multiple of the given factor.
222 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
226 // Check for divisibility.
227 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
229 ConstantInt::get(SE.getContext(),
230 C->getValue()->getValue().sdiv(
231 FC->getValue()->getValue()));
232 // If the quotient is zero and the remainder is non-zero, reject
233 // the value at this scale. It will be considered for subsequent
236 const SCEV *Div = SE.getConstant(CI);
239 SE.getAddExpr(Remainder,
240 SE.getConstant(C->getValue()->getValue().srem(
241 FC->getValue()->getValue())));
247 // In a Mul, check if there is a constant operand which is a multiple
248 // of the given factor.
249 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
250 // Size is known, check if there is a constant operand which is a multiple
251 // of the given factor. If so, we can factor it.
252 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
253 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
254 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
255 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
256 NewMulOps[0] = SE.getConstant(
257 C->getValue()->getValue().sdiv(FC->getValue()->getValue()));
258 S = SE.getMulExpr(NewMulOps);
263 // In an AddRec, check if both start and step are divisible.
264 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
265 const SCEV *Step = A->getStepRecurrence(SE);
266 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
267 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
269 if (!StepRem->isZero())
271 const SCEV *Start = A->getStart();
272 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
274 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
275 A->getNoWrapFlags(SCEV::FlagNW));
282 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
283 /// is the number of SCEVAddRecExprs present, which are kept at the end of
286 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
288 ScalarEvolution &SE) {
289 unsigned NumAddRecs = 0;
290 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
292 // Group Ops into non-addrecs and addrecs.
293 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
294 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
295 // Let ScalarEvolution sort and simplify the non-addrecs list.
296 const SCEV *Sum = NoAddRecs.empty() ?
297 SE.getConstant(Ty, 0) :
298 SE.getAddExpr(NoAddRecs);
299 // If it returned an add, use the operands. Otherwise it simplified
300 // the sum into a single value, so just use that.
302 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
303 Ops.append(Add->op_begin(), Add->op_end());
304 else if (!Sum->isZero())
306 // Then append the addrecs.
307 Ops.append(AddRecs.begin(), AddRecs.end());
310 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
311 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
312 /// This helps expose more opportunities for folding parts of the expressions
313 /// into GEP indices.
315 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
317 ScalarEvolution &SE) {
319 SmallVector<const SCEV *, 8> AddRecs;
320 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
321 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
322 const SCEV *Start = A->getStart();
323 if (Start->isZero()) break;
324 const SCEV *Zero = SE.getConstant(Ty, 0);
325 AddRecs.push_back(SE.getAddRecExpr(Zero,
326 A->getStepRecurrence(SE),
328 A->getNoWrapFlags(SCEV::FlagNW)));
329 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
331 Ops.append(Add->op_begin(), Add->op_end());
332 e += Add->getNumOperands();
337 if (!AddRecs.empty()) {
338 // Add the addrecs onto the end of the list.
339 Ops.append(AddRecs.begin(), AddRecs.end());
340 // Resort the operand list, moving any constants to the front.
341 SimplifyAddOperands(Ops, Ty, SE);
345 /// expandAddToGEP - Expand an addition expression with a pointer type into
346 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
347 /// BasicAliasAnalysis and other passes analyze the result. See the rules
348 /// for getelementptr vs. inttoptr in
349 /// http://llvm.org/docs/LangRef.html#pointeraliasing
352 /// Design note: The correctness of using getelementptr here depends on
353 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
354 /// they may introduce pointer arithmetic which may not be safely converted
355 /// into getelementptr.
357 /// Design note: It might seem desirable for this function to be more
358 /// loop-aware. If some of the indices are loop-invariant while others
359 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
360 /// loop-invariant portions of the overall computation outside the loop.
361 /// However, there are a few reasons this is not done here. Hoisting simple
362 /// arithmetic is a low-level optimization that often isn't very
363 /// important until late in the optimization process. In fact, passes
364 /// like InstructionCombining will combine GEPs, even if it means
365 /// pushing loop-invariant computation down into loops, so even if the
366 /// GEPs were split here, the work would quickly be undone. The
367 /// LoopStrengthReduction pass, which is usually run quite late (and
368 /// after the last InstructionCombining pass), takes care of hoisting
369 /// loop-invariant portions of expressions, after considering what
370 /// can be folded using target addressing modes.
372 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
373 const SCEV *const *op_end,
377 Type *OriginalElTy = PTy->getElementType();
378 Type *ElTy = OriginalElTy;
379 SmallVector<Value *, 4> GepIndices;
380 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
381 bool AnyNonZeroIndices = false;
383 // Split AddRecs up into parts as either of the parts may be usable
384 // without the other.
385 SplitAddRecs(Ops, Ty, SE);
387 Type *IntPtrTy = DL.getIntPtrType(PTy);
389 // Descend down the pointer's type and attempt to convert the other
390 // operands into GEP indices, at each level. The first index in a GEP
391 // indexes into the array implied by the pointer operand; the rest of
392 // the indices index into the element or field type selected by the
395 // If the scale size is not 0, attempt to factor out a scale for
397 SmallVector<const SCEV *, 8> ScaledOps;
398 if (ElTy->isSized()) {
399 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
400 if (!ElSize->isZero()) {
401 SmallVector<const SCEV *, 8> NewOps;
402 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
403 const SCEV *Op = Ops[i];
404 const SCEV *Remainder = SE.getConstant(Ty, 0);
405 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
406 // Op now has ElSize factored out.
407 ScaledOps.push_back(Op);
408 if (!Remainder->isZero())
409 NewOps.push_back(Remainder);
410 AnyNonZeroIndices = true;
412 // The operand was not divisible, so add it to the list of operands
413 // we'll scan next iteration.
414 NewOps.push_back(Ops[i]);
417 // If we made any changes, update Ops.
418 if (!ScaledOps.empty()) {
420 SimplifyAddOperands(Ops, Ty, SE);
425 // Record the scaled array index for this level of the type. If
426 // we didn't find any operands that could be factored, tentatively
427 // assume that element zero was selected (since the zero offset
428 // would obviously be folded away).
429 Value *Scaled = ScaledOps.empty() ?
430 Constant::getNullValue(Ty) :
431 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
432 GepIndices.push_back(Scaled);
434 // Collect struct field index operands.
435 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
436 bool FoundFieldNo = false;
437 // An empty struct has no fields.
438 if (STy->getNumElements() == 0) break;
439 // Field offsets are known. See if a constant offset falls within any of
440 // the struct fields.
443 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
444 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
445 const StructLayout &SL = *DL.getStructLayout(STy);
446 uint64_t FullOffset = C->getValue()->getZExtValue();
447 if (FullOffset < SL.getSizeInBytes()) {
448 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
449 GepIndices.push_back(
450 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
451 ElTy = STy->getTypeAtIndex(ElIdx);
453 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
454 AnyNonZeroIndices = true;
458 // If no struct field offsets were found, tentatively assume that
459 // field zero was selected (since the zero offset would obviously
462 ElTy = STy->getTypeAtIndex(0u);
463 GepIndices.push_back(
464 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
468 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
469 ElTy = ATy->getElementType();
474 // If none of the operands were convertible to proper GEP indices, cast
475 // the base to i8* and do an ugly getelementptr with that. It's still
476 // better than ptrtoint+arithmetic+inttoptr at least.
477 if (!AnyNonZeroIndices) {
478 // Cast the base to i8*.
479 V = InsertNoopCastOfTo(V,
480 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
482 assert(!isa<Instruction>(V) ||
483 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
485 // Expand the operands for a plain byte offset.
486 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
488 // Fold a GEP with constant operands.
489 if (Constant *CLHS = dyn_cast<Constant>(V))
490 if (Constant *CRHS = dyn_cast<Constant>(Idx))
491 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
494 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
495 unsigned ScanLimit = 6;
496 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
497 // Scanning starts from the last instruction before the insertion point.
498 BasicBlock::iterator IP = Builder.GetInsertPoint();
499 if (IP != BlockBegin) {
501 for (; ScanLimit; --IP, --ScanLimit) {
502 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
504 if (isa<DbgInfoIntrinsic>(IP))
506 if (IP->getOpcode() == Instruction::GetElementPtr &&
507 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
509 if (IP == BlockBegin) break;
513 // Save the original insertion point so we can restore it when we're done.
514 BuilderType::InsertPointGuard Guard(Builder);
516 // Move the insertion point out of as many loops as we can.
517 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
518 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
519 BasicBlock *Preheader = L->getLoopPreheader();
520 if (!Preheader) break;
522 // Ok, move up a level.
523 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
527 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
528 rememberInstruction(GEP);
533 // Save the original insertion point so we can restore it when we're done.
534 BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
536 // Move the insertion point out of as many loops as we can.
537 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
538 if (!L->isLoopInvariant(V)) break;
540 bool AnyIndexNotLoopInvariant = false;
541 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
542 E = GepIndices.end(); I != E; ++I)
543 if (!L->isLoopInvariant(*I)) {
544 AnyIndexNotLoopInvariant = true;
547 if (AnyIndexNotLoopInvariant)
550 BasicBlock *Preheader = L->getLoopPreheader();
551 if (!Preheader) break;
553 // Ok, move up a level.
554 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
557 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
558 // because ScalarEvolution may have changed the address arithmetic to
559 // compute a value which is beyond the end of the allocated object.
561 if (V->getType() != PTy)
562 Casted = InsertNoopCastOfTo(Casted, PTy);
563 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted,
566 Ops.push_back(SE.getUnknown(GEP));
567 rememberInstruction(GEP);
569 // Restore the original insert point.
570 Builder.restoreIP(SaveInsertPt);
572 return expand(SE.getAddExpr(Ops));
575 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
576 /// SCEV expansion. If they are nested, this is the most nested. If they are
577 /// neighboring, pick the later.
578 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
582 if (A->contains(B)) return B;
583 if (B->contains(A)) return A;
584 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
585 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
586 return A; // Arbitrarily break the tie.
589 /// getRelevantLoop - Get the most relevant loop associated with the given
590 /// expression, according to PickMostRelevantLoop.
591 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
592 // Test whether we've already computed the most relevant loop for this SCEV.
593 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
594 RelevantLoops.insert(std::make_pair(S, nullptr));
596 return Pair.first->second;
598 if (isa<SCEVConstant>(S))
599 // A constant has no relevant loops.
601 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
602 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
603 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
604 // A non-instruction has no relevant loops.
607 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
608 const Loop *L = nullptr;
609 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
611 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
613 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
614 return RelevantLoops[N] = L;
616 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
617 const Loop *Result = getRelevantLoop(C->getOperand());
618 return RelevantLoops[C] = Result;
620 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
622 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
623 getRelevantLoop(D->getRHS()),
625 return RelevantLoops[D] = Result;
627 llvm_unreachable("Unexpected SCEV type!");
632 /// LoopCompare - Compare loops by PickMostRelevantLoop.
636 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
638 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
639 std::pair<const Loop *, const SCEV *> RHS) const {
640 // Keep pointer operands sorted at the end.
641 if (LHS.second->getType()->isPointerTy() !=
642 RHS.second->getType()->isPointerTy())
643 return LHS.second->getType()->isPointerTy();
645 // Compare loops with PickMostRelevantLoop.
646 if (LHS.first != RHS.first)
647 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
649 // If one operand is a non-constant negative and the other is not,
650 // put the non-constant negative on the right so that a sub can
651 // be used instead of a negate and add.
652 if (LHS.second->isNonConstantNegative()) {
653 if (!RHS.second->isNonConstantNegative())
655 } else if (RHS.second->isNonConstantNegative())
658 // Otherwise they are equivalent according to this comparison.
665 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
666 Type *Ty = SE.getEffectiveSCEVType(S->getType());
668 // Collect all the add operands in a loop, along with their associated loops.
669 // Iterate in reverse so that constants are emitted last, all else equal, and
670 // so that pointer operands are inserted first, which the code below relies on
671 // to form more involved GEPs.
672 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
673 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
674 E(S->op_begin()); I != E; ++I)
675 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
677 // Sort by loop. Use a stable sort so that constants follow non-constants and
678 // pointer operands precede non-pointer operands.
679 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
681 // Emit instructions to add all the operands. Hoist as much as possible
682 // out of loops, and form meaningful getelementptrs where possible.
683 Value *Sum = nullptr;
684 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
685 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
686 const Loop *CurLoop = I->first;
687 const SCEV *Op = I->second;
689 // This is the first operand. Just expand it.
692 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
693 // The running sum expression is a pointer. Try to form a getelementptr
694 // at this level with that as the base.
695 SmallVector<const SCEV *, 4> NewOps;
696 for (; I != E && I->first == CurLoop; ++I) {
697 // If the operand is SCEVUnknown and not instructions, peek through
698 // it, to enable more of it to be folded into the GEP.
699 const SCEV *X = I->second;
700 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
701 if (!isa<Instruction>(U->getValue()))
702 X = SE.getSCEV(U->getValue());
705 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
706 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
707 // The running sum is an integer, and there's a pointer at this level.
708 // Try to form a getelementptr. If the running sum is instructions,
709 // use a SCEVUnknown to avoid re-analyzing them.
710 SmallVector<const SCEV *, 4> NewOps;
711 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
713 for (++I; I != E && I->first == CurLoop; ++I)
714 NewOps.push_back(I->second);
715 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
716 } else if (Op->isNonConstantNegative()) {
717 // Instead of doing a negate and add, just do a subtract.
718 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
719 Sum = InsertNoopCastOfTo(Sum, Ty);
720 Sum = InsertBinop(Instruction::Sub, Sum, W);
724 Value *W = expandCodeFor(Op, Ty);
725 Sum = InsertNoopCastOfTo(Sum, Ty);
726 // Canonicalize a constant to the RHS.
727 if (isa<Constant>(Sum)) std::swap(Sum, W);
728 Sum = InsertBinop(Instruction::Add, Sum, W);
736 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
737 Type *Ty = SE.getEffectiveSCEVType(S->getType());
739 // Collect all the mul operands in a loop, along with their associated loops.
740 // Iterate in reverse so that constants are emitted last, all else equal.
741 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
742 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
743 E(S->op_begin()); I != E; ++I)
744 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
746 // Sort by loop. Use a stable sort so that constants follow non-constants.
747 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
749 // Emit instructions to mul all the operands. Hoist as much as possible
751 Value *Prod = nullptr;
752 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
753 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
754 const SCEV *Op = I->second;
756 // This is the first operand. Just expand it.
759 } else if (Op->isAllOnesValue()) {
760 // Instead of doing a multiply by negative one, just do a negate.
761 Prod = InsertNoopCastOfTo(Prod, Ty);
762 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
766 Value *W = expandCodeFor(Op, Ty);
767 Prod = InsertNoopCastOfTo(Prod, Ty);
768 // Canonicalize a constant to the RHS.
769 if (isa<Constant>(Prod)) std::swap(Prod, W);
770 Prod = InsertBinop(Instruction::Mul, Prod, W);
778 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
779 Type *Ty = SE.getEffectiveSCEVType(S->getType());
781 Value *LHS = expandCodeFor(S->getLHS(), Ty);
782 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
783 const APInt &RHS = SC->getValue()->getValue();
784 if (RHS.isPowerOf2())
785 return InsertBinop(Instruction::LShr, LHS,
786 ConstantInt::get(Ty, RHS.logBase2()));
789 Value *RHS = expandCodeFor(S->getRHS(), Ty);
790 return InsertBinop(Instruction::UDiv, LHS, RHS);
793 /// Move parts of Base into Rest to leave Base with the minimal
794 /// expression that provides a pointer operand suitable for a
796 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
797 ScalarEvolution &SE) {
798 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
799 Base = A->getStart();
800 Rest = SE.getAddExpr(Rest,
801 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
802 A->getStepRecurrence(SE),
804 A->getNoWrapFlags(SCEV::FlagNW)));
806 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
807 Base = A->getOperand(A->getNumOperands()-1);
808 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
809 NewAddOps.back() = Rest;
810 Rest = SE.getAddExpr(NewAddOps);
811 ExposePointerBase(Base, Rest, SE);
815 /// Determine if this is a well-behaved chain of instructions leading back to
816 /// the PHI. If so, it may be reused by expanded expressions.
817 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
819 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
820 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
822 // If any of the operands don't dominate the insert position, bail.
823 // Addrec operands are always loop-invariant, so this can only happen
824 // if there are instructions which haven't been hoisted.
825 if (L == IVIncInsertLoop) {
826 for (User::op_iterator OI = IncV->op_begin()+1,
827 OE = IncV->op_end(); OI != OE; ++OI)
828 if (Instruction *OInst = dyn_cast<Instruction>(OI))
829 if (!SE.DT->dominates(OInst, IVIncInsertPos))
832 // Advance to the next instruction.
833 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
837 if (IncV->mayHaveSideEffects())
843 return isNormalAddRecExprPHI(PN, IncV, L);
846 /// getIVIncOperand returns an induction variable increment's induction
847 /// variable operand.
849 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
850 /// operands dominate InsertPos.
852 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
853 /// simple patterns generated by getAddRecExprPHILiterally and
854 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
855 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
856 Instruction *InsertPos,
858 if (IncV == InsertPos)
861 switch (IncV->getOpcode()) {
864 // Check for a simple Add/Sub or GEP of a loop invariant step.
865 case Instruction::Add:
866 case Instruction::Sub: {
867 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
868 if (!OInst || SE.DT->dominates(OInst, InsertPos))
869 return dyn_cast<Instruction>(IncV->getOperand(0));
872 case Instruction::BitCast:
873 return dyn_cast<Instruction>(IncV->getOperand(0));
874 case Instruction::GetElementPtr:
875 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
877 if (isa<Constant>(*I))
879 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
880 if (!SE.DT->dominates(OInst, InsertPos))
884 // allow any kind of GEP as long as it can be hoisted.
887 // This must be a pointer addition of constants (pretty), which is already
888 // handled, or some number of address-size elements (ugly). Ugly geps
889 // have 2 operands. i1* is used by the expander to represent an
890 // address-size element.
891 if (IncV->getNumOperands() != 2)
893 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
894 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
895 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
899 return dyn_cast<Instruction>(IncV->getOperand(0));
903 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
904 /// it available to other uses in this loop. Recursively hoist any operands,
905 /// until we reach a value that dominates InsertPos.
906 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
907 if (SE.DT->dominates(IncV, InsertPos))
910 // InsertPos must itself dominate IncV so that IncV's new position satisfies
911 // its existing users.
912 if (isa<PHINode>(InsertPos)
913 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
916 // Check that the chain of IV operands leading back to Phi can be hoisted.
917 SmallVector<Instruction*, 4> IVIncs;
919 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
922 // IncV is safe to hoist.
923 IVIncs.push_back(IncV);
925 if (SE.DT->dominates(IncV, InsertPos))
928 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
929 E = IVIncs.rend(); I != E; ++I) {
930 (*I)->moveBefore(InsertPos);
935 /// Determine if this cyclic phi is in a form that would have been generated by
936 /// LSR. We don't care if the phi was actually expanded in this pass, as long
937 /// as it is in a low-cost form, for example, no implied multiplication. This
938 /// should match any patterns generated by getAddRecExprPHILiterally and
940 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
942 for(Instruction *IVOper = IncV;
943 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
944 /*allowScale=*/false));) {
951 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
952 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
953 /// need to materialize IV increments elsewhere to handle difficult situations.
954 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
955 Type *ExpandTy, Type *IntTy,
958 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
959 if (ExpandTy->isPointerTy()) {
960 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
961 // If the step isn't constant, don't use an implicitly scaled GEP, because
962 // that would require a multiply inside the loop.
963 if (!isa<ConstantInt>(StepV))
964 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
965 GEPPtrTy->getAddressSpace());
966 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
967 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
968 if (IncV->getType() != PN->getType()) {
969 IncV = Builder.CreateBitCast(IncV, PN->getType());
970 rememberInstruction(IncV);
974 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
975 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
976 rememberInstruction(IncV);
981 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
982 /// position. This routine assumes that this is possible (has been checked).
983 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
984 Instruction *Pos, PHINode *LoopPhi) {
986 if (DT->dominates(InstToHoist, Pos))
988 // Make sure the increment is where we want it. But don't move it
989 // down past a potential existing post-inc user.
990 InstToHoist->moveBefore(Pos);
992 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
993 } while (InstToHoist != LoopPhi);
996 /// \brief Check whether we can cheaply express the requested SCEV in terms of
997 /// the available PHI SCEV by truncation and/or invertion of the step.
998 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
999 const SCEVAddRecExpr *Phi,
1000 const SCEVAddRecExpr *Requested,
1002 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1003 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1005 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1008 // Try truncate it if necessary.
1009 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1013 // Check whether truncation will help.
1014 if (Phi == Requested) {
1019 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1020 if (SE.getAddExpr(Requested->getStart(),
1021 SE.getNegativeSCEV(Requested)) == Phi) {
1029 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1030 if (!isa<IntegerType>(AR->getType()))
1033 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1034 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1035 const SCEV *Step = AR->getStepRecurrence(SE);
1036 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1037 SE.getSignExtendExpr(AR, WideTy));
1038 const SCEV *ExtendAfterOp =
1039 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1040 return ExtendAfterOp == OpAfterExtend;
1043 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1044 if (!isa<IntegerType>(AR->getType()))
1047 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1048 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1049 const SCEV *Step = AR->getStepRecurrence(SE);
1050 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1051 SE.getZeroExtendExpr(AR, WideTy));
1052 const SCEV *ExtendAfterOp =
1053 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1054 return ExtendAfterOp == OpAfterExtend;
1057 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1058 /// the base addrec, which is the addrec without any non-loop-dominating
1059 /// values, and return the PHI.
1061 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1067 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1069 // Reuse a previously-inserted PHI, if present.
1070 BasicBlock *LatchBlock = L->getLoopLatch();
1072 PHINode *AddRecPhiMatch = nullptr;
1073 Instruction *IncV = nullptr;
1077 // Only try partially matching scevs that need truncation and/or
1078 // step-inversion if we know this loop is outside the current loop.
1079 bool TryNonMatchingSCEV = IVIncInsertLoop &&
1080 SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1082 for (BasicBlock::iterator I = L->getHeader()->begin();
1083 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1084 if (!SE.isSCEVable(PN->getType()))
1087 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1091 bool IsMatchingSCEV = PhiSCEV == Normalized;
1092 // We only handle truncation and inversion of phi recurrences for the
1093 // expanded expression if the expanded expression's loop dominates the
1094 // loop we insert to. Check now, so we can bail out early.
1095 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1098 Instruction *TempIncV =
1099 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1101 // Check whether we can reuse this PHI node.
1103 if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1105 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1108 if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1112 // Stop if we have found an exact match SCEV.
1113 if (IsMatchingSCEV) {
1117 AddRecPhiMatch = PN;
1121 // Try whether the phi can be translated into the requested form
1122 // (truncated and/or offset by a constant).
1123 if ((!TruncTy || InvertStep) &&
1124 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1125 // Record the phi node. But don't stop we might find an exact match
1127 AddRecPhiMatch = PN;
1129 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1133 if (AddRecPhiMatch) {
1134 // Potentially, move the increment. We have made sure in
1135 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1136 if (L == IVIncInsertLoop)
1137 hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1139 // Ok, the add recurrence looks usable.
1140 // Remember this PHI, even in post-inc mode.
1141 InsertedValues.insert(AddRecPhiMatch);
1142 // Remember the increment.
1143 rememberInstruction(IncV);
1144 return AddRecPhiMatch;
1148 // Save the original insertion point so we can restore it when we're done.
1149 BuilderType::InsertPointGuard Guard(Builder);
1151 // Another AddRec may need to be recursively expanded below. For example, if
1152 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1153 // loop. Remove this loop from the PostIncLoops set before expanding such
1154 // AddRecs. Otherwise, we cannot find a valid position for the step
1155 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1156 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1157 // so it's not worth implementing SmallPtrSet::swap.
1158 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1159 PostIncLoops.clear();
1161 // Expand code for the start value.
1162 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1163 L->getHeader()->begin());
1165 // StartV must be hoisted into L's preheader to dominate the new phi.
1166 assert(!isa<Instruction>(StartV) ||
1167 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1170 // Expand code for the step value. Do this before creating the PHI so that PHI
1171 // reuse code doesn't see an incomplete PHI.
1172 const SCEV *Step = Normalized->getStepRecurrence(SE);
1173 // If the stride is negative, insert a sub instead of an add for the increment
1174 // (unless it's a constant, because subtracts of constants are canonicalized
1176 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1178 Step = SE.getNegativeSCEV(Step);
1179 // Expand the step somewhere that dominates the loop header.
1180 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1182 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1183 // we actually do emit an addition. It does not apply if we emit a
1185 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1186 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1189 BasicBlock *Header = L->getHeader();
1190 Builder.SetInsertPoint(Header, Header->begin());
1191 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1192 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1193 Twine(IVName) + ".iv");
1194 rememberInstruction(PN);
1196 // Create the step instructions and populate the PHI.
1197 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1198 BasicBlock *Pred = *HPI;
1200 // Add a start value.
1201 if (!L->contains(Pred)) {
1202 PN->addIncoming(StartV, Pred);
1206 // Create a step value and add it to the PHI.
1207 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1208 // instructions at IVIncInsertPos.
1209 Instruction *InsertPos = L == IVIncInsertLoop ?
1210 IVIncInsertPos : Pred->getTerminator();
1211 Builder.SetInsertPoint(InsertPos);
1212 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1214 if (isa<OverflowingBinaryOperator>(IncV)) {
1216 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1218 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1220 PN->addIncoming(IncV, Pred);
1223 // After expanding subexpressions, restore the PostIncLoops set so the caller
1224 // can ensure that IVIncrement dominates the current uses.
1225 PostIncLoops = SavedPostIncLoops;
1227 // Remember this PHI, even in post-inc mode.
1228 InsertedValues.insert(PN);
1233 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1234 Type *STy = S->getType();
1235 Type *IntTy = SE.getEffectiveSCEVType(STy);
1236 const Loop *L = S->getLoop();
1238 // Determine a normalized form of this expression, which is the expression
1239 // before any post-inc adjustment is made.
1240 const SCEVAddRecExpr *Normalized = S;
1241 if (PostIncLoops.count(L)) {
1242 PostIncLoopSet Loops;
1245 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
1246 nullptr, Loops, SE, *SE.DT));
1249 // Strip off any non-loop-dominating component from the addrec start.
1250 const SCEV *Start = Normalized->getStart();
1251 const SCEV *PostLoopOffset = nullptr;
1252 if (!SE.properlyDominates(Start, L->getHeader())) {
1253 PostLoopOffset = Start;
1254 Start = SE.getConstant(Normalized->getType(), 0);
1255 Normalized = cast<SCEVAddRecExpr>(
1256 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1257 Normalized->getLoop(),
1258 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1261 // Strip off any non-loop-dominating component from the addrec step.
1262 const SCEV *Step = Normalized->getStepRecurrence(SE);
1263 const SCEV *PostLoopScale = nullptr;
1264 if (!SE.dominates(Step, L->getHeader())) {
1265 PostLoopScale = Step;
1266 Step = SE.getConstant(Normalized->getType(), 1);
1268 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1269 Start, Step, Normalized->getLoop(),
1270 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1273 // Expand the core addrec. If we need post-loop scaling, force it to
1274 // expand to an integer type to avoid the need for additional casting.
1275 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1276 // In some cases, we decide to reuse an existing phi node but need to truncate
1277 // it and/or invert the step.
1278 Type *TruncTy = nullptr;
1279 bool InvertStep = false;
1280 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1281 TruncTy, InvertStep);
1283 // Accommodate post-inc mode, if necessary.
1285 if (!PostIncLoops.count(L))
1288 // In PostInc mode, use the post-incremented value.
1289 BasicBlock *LatchBlock = L->getLoopLatch();
1290 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1291 Result = PN->getIncomingValueForBlock(LatchBlock);
1293 // For an expansion to use the postinc form, the client must call
1294 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1295 // or dominated by IVIncInsertPos.
1296 if (isa<Instruction>(Result)
1297 && !SE.DT->dominates(cast<Instruction>(Result),
1298 Builder.GetInsertPoint())) {
1299 // The induction variable's postinc expansion does not dominate this use.
1300 // IVUsers tries to prevent this case, so it is rare. However, it can
1301 // happen when an IVUser outside the loop is not dominated by the latch
1302 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1303 // all cases. Consider a phi outide whose operand is replaced during
1304 // expansion with the value of the postinc user. Without fundamentally
1305 // changing the way postinc users are tracked, the only remedy is
1306 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1307 // but hopefully expandCodeFor handles that.
1309 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1311 Step = SE.getNegativeSCEV(Step);
1314 // Expand the step somewhere that dominates the loop header.
1315 BuilderType::InsertPointGuard Guard(Builder);
1316 StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1318 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1322 // We have decided to reuse an induction variable of a dominating loop. Apply
1323 // truncation and/or invertion of the step.
1325 Type *ResTy = Result->getType();
1326 // Normalize the result type.
1327 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1328 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1329 // Truncate the result.
1330 if (TruncTy != Result->getType()) {
1331 Result = Builder.CreateTrunc(Result, TruncTy);
1332 rememberInstruction(Result);
1334 // Invert the result.
1336 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1338 rememberInstruction(Result);
1342 // Re-apply any non-loop-dominating scale.
1343 if (PostLoopScale) {
1344 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1345 Result = InsertNoopCastOfTo(Result, IntTy);
1346 Result = Builder.CreateMul(Result,
1347 expandCodeFor(PostLoopScale, IntTy));
1348 rememberInstruction(Result);
1351 // Re-apply any non-loop-dominating offset.
1352 if (PostLoopOffset) {
1353 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1354 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1355 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1357 Result = InsertNoopCastOfTo(Result, IntTy);
1358 Result = Builder.CreateAdd(Result,
1359 expandCodeFor(PostLoopOffset, IntTy));
1360 rememberInstruction(Result);
1367 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1368 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1370 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1371 const Loop *L = S->getLoop();
1373 // First check for an existing canonical IV in a suitable type.
1374 PHINode *CanonicalIV = nullptr;
1375 if (PHINode *PN = L->getCanonicalInductionVariable())
1376 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1379 // Rewrite an AddRec in terms of the canonical induction variable, if
1380 // its type is more narrow.
1382 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1383 SE.getTypeSizeInBits(Ty)) {
1384 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1385 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1386 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1387 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1388 S->getNoWrapFlags(SCEV::FlagNW)));
1389 BasicBlock::iterator NewInsertPt =
1390 std::next(BasicBlock::iterator(cast<Instruction>(V)));
1391 BuilderType::InsertPointGuard Guard(Builder);
1392 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1393 isa<LandingPadInst>(NewInsertPt))
1395 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1400 // {X,+,F} --> X + {0,+,F}
1401 if (!S->getStart()->isZero()) {
1402 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1403 NewOps[0] = SE.getConstant(Ty, 0);
1404 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1405 S->getNoWrapFlags(SCEV::FlagNW));
1407 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1408 // comments on expandAddToGEP for details.
1409 const SCEV *Base = S->getStart();
1410 const SCEV *RestArray[1] = { Rest };
1411 // Dig into the expression to find the pointer base for a GEP.
1412 ExposePointerBase(Base, RestArray[0], SE);
1413 // If we found a pointer, expand the AddRec with a GEP.
1414 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1415 // Make sure the Base isn't something exotic, such as a multiplied
1416 // or divided pointer value. In those cases, the result type isn't
1417 // actually a pointer type.
1418 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1419 Value *StartV = expand(Base);
1420 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1421 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1425 // Just do a normal add. Pre-expand the operands to suppress folding.
1426 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1427 SE.getUnknown(expand(Rest))));
1430 // If we don't yet have a canonical IV, create one.
1432 // Create and insert the PHI node for the induction variable in the
1434 BasicBlock *Header = L->getHeader();
1435 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1436 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1438 rememberInstruction(CanonicalIV);
1440 SmallSet<BasicBlock *, 4> PredSeen;
1441 Constant *One = ConstantInt::get(Ty, 1);
1442 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1443 BasicBlock *HP = *HPI;
1444 if (!PredSeen.insert(HP).second) {
1445 // There must be an incoming value for each predecessor, even the
1447 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1451 if (L->contains(HP)) {
1452 // Insert a unit add instruction right before the terminator
1453 // corresponding to the back-edge.
1454 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1456 HP->getTerminator());
1457 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1458 rememberInstruction(Add);
1459 CanonicalIV->addIncoming(Add, HP);
1461 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1466 // {0,+,1} --> Insert a canonical induction variable into the loop!
1467 if (S->isAffine() && S->getOperand(1)->isOne()) {
1468 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1469 "IVs with types different from the canonical IV should "
1470 "already have been handled!");
1474 // {0,+,F} --> {0,+,1} * F
1476 // If this is a simple linear addrec, emit it now as a special case.
1477 if (S->isAffine()) // {0,+,F} --> i*F
1479 expand(SE.getTruncateOrNoop(
1480 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1481 SE.getNoopOrAnyExtend(S->getOperand(1),
1482 CanonicalIV->getType())),
1485 // If this is a chain of recurrences, turn it into a closed form, using the
1486 // folders, then expandCodeFor the closed form. This allows the folders to
1487 // simplify the expression without having to build a bunch of special code
1488 // into this folder.
1489 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1491 // Promote S up to the canonical IV type, if the cast is foldable.
1492 const SCEV *NewS = S;
1493 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1494 if (isa<SCEVAddRecExpr>(Ext))
1497 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1498 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1500 // Truncate the result down to the original type, if needed.
1501 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1505 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1506 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1507 Value *V = expandCodeFor(S->getOperand(),
1508 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1509 Value *I = Builder.CreateTrunc(V, Ty);
1510 rememberInstruction(I);
1514 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1515 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1516 Value *V = expandCodeFor(S->getOperand(),
1517 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1518 Value *I = Builder.CreateZExt(V, Ty);
1519 rememberInstruction(I);
1523 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1524 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1525 Value *V = expandCodeFor(S->getOperand(),
1526 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1527 Value *I = Builder.CreateSExt(V, Ty);
1528 rememberInstruction(I);
1532 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1533 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1534 Type *Ty = LHS->getType();
1535 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1536 // In the case of mixed integer and pointer types, do the
1537 // rest of the comparisons as integer.
1538 if (S->getOperand(i)->getType() != Ty) {
1539 Ty = SE.getEffectiveSCEVType(Ty);
1540 LHS = InsertNoopCastOfTo(LHS, Ty);
1542 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1543 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1544 rememberInstruction(ICmp);
1545 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1546 rememberInstruction(Sel);
1549 // In the case of mixed integer and pointer types, cast the
1550 // final result back to the pointer type.
1551 if (LHS->getType() != S->getType())
1552 LHS = InsertNoopCastOfTo(LHS, S->getType());
1556 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1557 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1558 Type *Ty = LHS->getType();
1559 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1560 // In the case of mixed integer and pointer types, do the
1561 // rest of the comparisons as integer.
1562 if (S->getOperand(i)->getType() != Ty) {
1563 Ty = SE.getEffectiveSCEVType(Ty);
1564 LHS = InsertNoopCastOfTo(LHS, Ty);
1566 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1567 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1568 rememberInstruction(ICmp);
1569 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1570 rememberInstruction(Sel);
1573 // In the case of mixed integer and pointer types, cast the
1574 // final result back to the pointer type.
1575 if (LHS->getType() != S->getType())
1576 LHS = InsertNoopCastOfTo(LHS, S->getType());
1580 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1582 Builder.SetInsertPoint(IP->getParent(), IP);
1583 return expandCodeFor(SH, Ty);
1586 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1587 // Expand the code for this SCEV.
1588 Value *V = expand(SH);
1590 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1591 "non-trivial casts should be done with the SCEVs directly!");
1592 V = InsertNoopCastOfTo(V, Ty);
1597 Value *SCEVExpander::expand(const SCEV *S) {
1598 // Compute an insertion point for this SCEV object. Hoist the instructions
1599 // as far out in the loop nest as possible.
1600 Instruction *InsertPt = Builder.GetInsertPoint();
1601 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1602 L = L->getParentLoop())
1603 if (SE.isLoopInvariant(S, L)) {
1605 if (BasicBlock *Preheader = L->getLoopPreheader())
1606 InsertPt = Preheader->getTerminator();
1608 // LSR sets the insertion point for AddRec start/step values to the
1609 // block start to simplify value reuse, even though it's an invalid
1610 // position. SCEVExpander must correct for this in all cases.
1611 InsertPt = L->getHeader()->getFirstInsertionPt();
1614 // If the SCEV is computable at this level, insert it into the header
1615 // after the PHIs (and after any other instructions that we've inserted
1616 // there) so that it is guaranteed to dominate any user inside the loop.
1617 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1618 InsertPt = L->getHeader()->getFirstInsertionPt();
1619 while (InsertPt != Builder.GetInsertPoint()
1620 && (isInsertedInstruction(InsertPt)
1621 || isa<DbgInfoIntrinsic>(InsertPt))) {
1622 InsertPt = std::next(BasicBlock::iterator(InsertPt));
1627 // Check to see if we already expanded this here.
1628 std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
1629 I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1630 if (I != InsertedExpressions.end())
1633 BuilderType::InsertPointGuard Guard(Builder);
1634 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1636 // Expand the expression into instructions.
1637 Value *V = visit(S);
1639 // Remember the expanded value for this SCEV at this location.
1641 // This is independent of PostIncLoops. The mapped value simply materializes
1642 // the expression at this insertion point. If the mapped value happened to be
1643 // a postinc expansion, it could be reused by a non-postinc user, but only if
1644 // its insertion point was already at the head of the loop.
1645 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1649 void SCEVExpander::rememberInstruction(Value *I) {
1650 if (!PostIncLoops.empty())
1651 InsertedPostIncValues.insert(I);
1653 InsertedValues.insert(I);
1656 /// getOrInsertCanonicalInductionVariable - This method returns the
1657 /// canonical induction variable of the specified type for the specified
1658 /// loop (inserting one if there is none). A canonical induction variable
1659 /// starts at zero and steps by one on each iteration.
1661 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1663 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1665 // Build a SCEV for {0,+,1}<L>.
1666 // Conservatively use FlagAnyWrap for now.
1667 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1668 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1670 // Emit code for it.
1671 BuilderType::InsertPointGuard Guard(Builder);
1672 PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
1673 L->getHeader()->begin()));
1678 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1679 /// replace them with their most canonical representative. Return the number of
1680 /// phis eliminated.
1682 /// This does not depend on any SCEVExpander state but should be used in
1683 /// the same context that SCEVExpander is used.
1684 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1685 SmallVectorImpl<WeakVH> &DeadInsts,
1686 const TargetTransformInfo *TTI) {
1687 // Find integer phis in order of increasing width.
1688 SmallVector<PHINode*, 8> Phis;
1689 for (BasicBlock::iterator I = L->getHeader()->begin();
1690 PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1691 Phis.push_back(Phi);
1694 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1695 // Put pointers at the back and make sure pointer < pointer = false.
1696 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1697 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1698 return RHS->getType()->getPrimitiveSizeInBits() <
1699 LHS->getType()->getPrimitiveSizeInBits();
1702 unsigned NumElim = 0;
1703 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1704 // Process phis from wide to narrow. Mapping wide phis to the their truncation
1705 // so narrow phis can reuse them.
1706 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1707 PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1708 PHINode *Phi = *PIter;
1710 // Fold constant phis. They may be congruent to other constant phis and
1711 // would confuse the logic below that expects proper IVs.
1712 if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
1713 Phi->replaceAllUsesWith(V);
1714 DeadInsts.push_back(Phi);
1716 DEBUG_WITH_TYPE(DebugType, dbgs()
1717 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1721 if (!SE.isSCEVable(Phi->getType()))
1724 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1727 if (Phi->getType()->isIntegerTy() && TTI
1728 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1729 // This phi can be freely truncated to the narrowest phi type. Map the
1730 // truncated expression to it so it will be reused for narrow types.
1731 const SCEV *TruncExpr =
1732 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1733 ExprToIVMap[TruncExpr] = Phi;
1738 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1740 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1743 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1744 Instruction *OrigInc =
1745 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1746 Instruction *IsomorphicInc =
1747 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1749 // If this phi has the same width but is more canonical, replace the
1750 // original with it. As part of the "more canonical" determination,
1751 // respect a prior decision to use an IV chain.
1752 if (OrigPhiRef->getType() == Phi->getType()
1753 && !(ChainedPhis.count(Phi)
1754 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1755 && (ChainedPhis.count(Phi)
1756 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1757 std::swap(OrigPhiRef, Phi);
1758 std::swap(OrigInc, IsomorphicInc);
1760 // Replacing the congruent phi is sufficient because acyclic redundancy
1761 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1762 // that a phi is congruent, it's often the head of an IV user cycle that
1763 // is isomorphic with the original phi. It's worth eagerly cleaning up the
1764 // common case of a single IV increment so that DeleteDeadPHIs can remove
1765 // cycles that had postinc uses.
1766 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1767 IsomorphicInc->getType());
1768 if (OrigInc != IsomorphicInc
1769 && TruncExpr == SE.getSCEV(IsomorphicInc)
1770 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1771 || hoistIVInc(OrigInc, IsomorphicInc))) {
1772 DEBUG_WITH_TYPE(DebugType, dbgs()
1773 << "INDVARS: Eliminated congruent iv.inc: "
1774 << *IsomorphicInc << '\n');
1775 Value *NewInc = OrigInc;
1776 if (OrigInc->getType() != IsomorphicInc->getType()) {
1777 Instruction *IP = nullptr;
1778 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1779 IP = PN->getParent()->getFirstInsertionPt();
1781 IP = OrigInc->getNextNode();
1783 IRBuilder<> Builder(IP);
1784 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1786 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1788 IsomorphicInc->replaceAllUsesWith(NewInc);
1789 DeadInsts.push_back(IsomorphicInc);
1792 DEBUG_WITH_TYPE(DebugType, dbgs()
1793 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1795 Value *NewIV = OrigPhiRef;
1796 if (OrigPhiRef->getType() != Phi->getType()) {
1797 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1798 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1799 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1801 Phi->replaceAllUsesWith(NewIV);
1802 DeadInsts.push_back(Phi);
1808 // Search for a SCEV subexpression that is not safe to expand. Any expression
1809 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1810 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
1811 // instruction, but the important thing is that we prove the denominator is
1812 // nonzero before expansion.
1814 // IVUsers already checks that IV-derived expressions are safe. So this check is
1815 // only needed when the expression includes some subexpression that is not IV
1818 // Currently, we only allow division by a nonzero constant here. If this is
1819 // inadequate, we could easily allow division by SCEVUnknown by using
1820 // ValueTracking to check isKnownNonZero().
1822 // We cannot generally expand recurrences unless the step dominates the loop
1823 // header. The expander handles the special case of affine recurrences by
1824 // scaling the recurrence outside the loop, but this technique isn't generally
1825 // applicable. Expanding a nested recurrence outside a loop requires computing
1826 // binomial coefficients. This could be done, but the recurrence has to be in a
1827 // perfectly reduced form, which can't be guaranteed.
1828 struct SCEVFindUnsafe {
1829 ScalarEvolution &SE;
1832 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
1834 bool follow(const SCEV *S) {
1835 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
1836 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
1837 if (!SC || SC->getValue()->isZero()) {
1842 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
1843 const SCEV *Step = AR->getStepRecurrence(SE);
1844 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
1851 bool isDone() const { return IsUnsafe; }
1856 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
1857 SCEVFindUnsafe Search(SE);
1858 visitAll(S, Search);
1859 return !Search.IsUnsafe;