1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
32 using namespace PatternMatch;
34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
35 /// reusing an existing cast if a suitable one exists, moving an existing
36 /// cast if a suitable one exists but isn't in the right place, or
37 /// creating a new one.
38 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
39 Instruction::CastOps Op,
40 BasicBlock::iterator IP) {
41 // This function must be called with the builder having a valid insertion
42 // point. It doesn't need to be the actual IP where the uses of the returned
43 // cast will be added, but it must dominate such IP.
44 // We use this precondition to produce a cast that will dominate all its
45 // uses. In particular, this is crucial for the case where the builder's
46 // insertion point *is* the point where we were asked to put the cast.
47 // Since we don't know the builder's insertion point is actually
48 // where the uses will be added (only that it dominates it), we are
49 // not allowed to move it.
50 BasicBlock::iterator BIP = Builder.GetInsertPoint();
52 Instruction *Ret = nullptr;
54 // Check to see if there is already a cast!
55 for (User *U : V->users())
56 if (U->getType() == Ty)
57 if (CastInst *CI = dyn_cast<CastInst>(U))
58 if (CI->getOpcode() == Op) {
59 // If the cast isn't where we want it, create a new cast at IP.
60 // Likewise, do not reuse a cast at BIP because it must dominate
61 // instructions that might be inserted before BIP.
62 if (BasicBlock::iterator(CI) != IP || BIP == IP) {
63 // Create a new cast, and leave the old cast in place in case
64 // it is being used as an insert point. Clear its operand
65 // so that it doesn't hold anything live.
66 Ret = CastInst::Create(Op, V, Ty, "", &*IP);
68 CI->replaceAllUsesWith(Ret);
69 CI->setOperand(0, UndefValue::get(V->getType()));
78 Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
80 // We assert at the end of the function since IP might point to an
81 // instruction with different dominance properties than a cast
82 // (an invoke for example) and not dominate BIP (but the cast does).
83 assert(SE.DT.dominates(Ret, &*BIP));
85 rememberInstruction(Ret);
89 static BasicBlock::iterator findInsertPointAfter(Instruction *I,
90 BasicBlock *MustDominate) {
91 BasicBlock::iterator IP = ++I->getIterator();
92 if (auto *II = dyn_cast<InvokeInst>(I))
93 IP = II->getNormalDest()->begin();
94 if (auto *CPI = dyn_cast<CatchPadInst>(I))
95 IP = CPI->getNormalDest()->begin();
97 while (isa<PHINode>(IP))
100 while (IP->isEHPad()) {
101 if (isa<LandingPadInst>(IP) || isa<CleanupPadInst>(IP)) {
103 } else if (auto *TPI = dyn_cast<TerminatePadInst>(IP)) {
104 IP = TPI->getUnwindDest()->getFirstNonPHI()->getIterator();
105 } else if (auto *CEPI = dyn_cast<CatchEndPadInst>(IP)) {
106 IP = CEPI->getUnwindDest()->getFirstNonPHI()->getIterator();
107 } else if (auto *CEPI = dyn_cast<CleanupEndPadInst>(IP)) {
108 IP = CEPI->getUnwindDest()->getFirstNonPHI()->getIterator();
109 } else if (isa<CatchPadInst>(IP)) {
110 IP = MustDominate->getFirstInsertionPt();
112 llvm_unreachable("unexpected eh pad!");
119 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
120 /// which must be possible with a noop cast, doing what we can to share
122 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
123 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
124 assert((Op == Instruction::BitCast ||
125 Op == Instruction::PtrToInt ||
126 Op == Instruction::IntToPtr) &&
127 "InsertNoopCastOfTo cannot perform non-noop casts!");
128 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
129 "InsertNoopCastOfTo cannot change sizes!");
131 // Short-circuit unnecessary bitcasts.
132 if (Op == Instruction::BitCast) {
133 if (V->getType() == Ty)
135 if (CastInst *CI = dyn_cast<CastInst>(V)) {
136 if (CI->getOperand(0)->getType() == Ty)
137 return CI->getOperand(0);
140 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
141 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
142 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
143 if (CastInst *CI = dyn_cast<CastInst>(V))
144 if ((CI->getOpcode() == Instruction::PtrToInt ||
145 CI->getOpcode() == Instruction::IntToPtr) &&
146 SE.getTypeSizeInBits(CI->getType()) ==
147 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
148 return CI->getOperand(0);
149 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
150 if ((CE->getOpcode() == Instruction::PtrToInt ||
151 CE->getOpcode() == Instruction::IntToPtr) &&
152 SE.getTypeSizeInBits(CE->getType()) ==
153 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
154 return CE->getOperand(0);
157 // Fold a cast of a constant.
158 if (Constant *C = dyn_cast<Constant>(V))
159 return ConstantExpr::getCast(Op, C, Ty);
161 // Cast the argument at the beginning of the entry block, after
162 // any bitcasts of other arguments.
163 if (Argument *A = dyn_cast<Argument>(V)) {
164 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
165 while ((isa<BitCastInst>(IP) &&
166 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
167 cast<BitCastInst>(IP)->getOperand(0) != A) ||
168 isa<DbgInfoIntrinsic>(IP))
170 return ReuseOrCreateCast(A, Ty, Op, IP);
173 // Cast the instruction immediately after the instruction.
174 Instruction *I = cast<Instruction>(V);
175 BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
176 return ReuseOrCreateCast(I, Ty, Op, IP);
179 /// InsertBinop - Insert the specified binary operator, doing a small amount
180 /// of work to avoid inserting an obviously redundant operation.
181 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
182 Value *LHS, Value *RHS) {
183 // Fold a binop with constant operands.
184 if (Constant *CLHS = dyn_cast<Constant>(LHS))
185 if (Constant *CRHS = dyn_cast<Constant>(RHS))
186 return ConstantExpr::get(Opcode, CLHS, CRHS);
188 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
189 unsigned ScanLimit = 6;
190 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
191 // Scanning starts from the last instruction before the insertion point.
192 BasicBlock::iterator IP = Builder.GetInsertPoint();
193 if (IP != BlockBegin) {
195 for (; ScanLimit; --IP, --ScanLimit) {
196 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
198 if (isa<DbgInfoIntrinsic>(IP))
200 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
201 IP->getOperand(1) == RHS)
203 if (IP == BlockBegin) break;
207 // Save the original insertion point so we can restore it when we're done.
208 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
209 BuilderType::InsertPointGuard Guard(Builder);
211 // Move the insertion point out of as many loops as we can.
212 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
213 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
214 BasicBlock *Preheader = L->getLoopPreheader();
215 if (!Preheader) break;
217 // Ok, move up a level.
218 Builder.SetInsertPoint(Preheader->getTerminator());
221 // If we haven't found this binop, insert it.
222 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
223 BO->setDebugLoc(Loc);
224 rememberInstruction(BO);
229 /// FactorOutConstant - Test if S is divisible by Factor, using signed
230 /// division. If so, update S with Factor divided out and return true.
231 /// S need not be evenly divisible if a reasonable remainder can be
233 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
234 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
235 /// check to see if the divide was folded.
236 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
237 const SCEV *Factor, ScalarEvolution &SE,
238 const DataLayout &DL) {
239 // Everything is divisible by one.
245 S = SE.getConstant(S->getType(), 1);
249 // For a Constant, check for a multiple of the given factor.
250 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
254 // Check for divisibility.
255 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
257 ConstantInt::get(SE.getContext(),
258 C->getValue()->getValue().sdiv(
259 FC->getValue()->getValue()));
260 // If the quotient is zero and the remainder is non-zero, reject
261 // the value at this scale. It will be considered for subsequent
264 const SCEV *Div = SE.getConstant(CI);
267 SE.getAddExpr(Remainder,
268 SE.getConstant(C->getValue()->getValue().srem(
269 FC->getValue()->getValue())));
275 // In a Mul, check if there is a constant operand which is a multiple
276 // of the given factor.
277 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
278 // Size is known, check if there is a constant operand which is a multiple
279 // of the given factor. If so, we can factor it.
280 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
281 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
282 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
283 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
284 NewMulOps[0] = SE.getConstant(
285 C->getValue()->getValue().sdiv(FC->getValue()->getValue()));
286 S = SE.getMulExpr(NewMulOps);
291 // In an AddRec, check if both start and step are divisible.
292 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
293 const SCEV *Step = A->getStepRecurrence(SE);
294 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
295 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
297 if (!StepRem->isZero())
299 const SCEV *Start = A->getStart();
300 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
302 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
303 A->getNoWrapFlags(SCEV::FlagNW));
310 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
311 /// is the number of SCEVAddRecExprs present, which are kept at the end of
314 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
316 ScalarEvolution &SE) {
317 unsigned NumAddRecs = 0;
318 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
320 // Group Ops into non-addrecs and addrecs.
321 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
322 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
323 // Let ScalarEvolution sort and simplify the non-addrecs list.
324 const SCEV *Sum = NoAddRecs.empty() ?
325 SE.getConstant(Ty, 0) :
326 SE.getAddExpr(NoAddRecs);
327 // If it returned an add, use the operands. Otherwise it simplified
328 // the sum into a single value, so just use that.
330 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
331 Ops.append(Add->op_begin(), Add->op_end());
332 else if (!Sum->isZero())
334 // Then append the addrecs.
335 Ops.append(AddRecs.begin(), AddRecs.end());
338 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
339 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
340 /// This helps expose more opportunities for folding parts of the expressions
341 /// into GEP indices.
343 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
345 ScalarEvolution &SE) {
347 SmallVector<const SCEV *, 8> AddRecs;
348 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
349 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
350 const SCEV *Start = A->getStart();
351 if (Start->isZero()) break;
352 const SCEV *Zero = SE.getConstant(Ty, 0);
353 AddRecs.push_back(SE.getAddRecExpr(Zero,
354 A->getStepRecurrence(SE),
356 A->getNoWrapFlags(SCEV::FlagNW)));
357 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
359 Ops.append(Add->op_begin(), Add->op_end());
360 e += Add->getNumOperands();
365 if (!AddRecs.empty()) {
366 // Add the addrecs onto the end of the list.
367 Ops.append(AddRecs.begin(), AddRecs.end());
368 // Resort the operand list, moving any constants to the front.
369 SimplifyAddOperands(Ops, Ty, SE);
373 /// expandAddToGEP - Expand an addition expression with a pointer type into
374 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
375 /// BasicAliasAnalysis and other passes analyze the result. See the rules
376 /// for getelementptr vs. inttoptr in
377 /// http://llvm.org/docs/LangRef.html#pointeraliasing
380 /// Design note: The correctness of using getelementptr here depends on
381 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
382 /// they may introduce pointer arithmetic which may not be safely converted
383 /// into getelementptr.
385 /// Design note: It might seem desirable for this function to be more
386 /// loop-aware. If some of the indices are loop-invariant while others
387 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
388 /// loop-invariant portions of the overall computation outside the loop.
389 /// However, there are a few reasons this is not done here. Hoisting simple
390 /// arithmetic is a low-level optimization that often isn't very
391 /// important until late in the optimization process. In fact, passes
392 /// like InstructionCombining will combine GEPs, even if it means
393 /// pushing loop-invariant computation down into loops, so even if the
394 /// GEPs were split here, the work would quickly be undone. The
395 /// LoopStrengthReduction pass, which is usually run quite late (and
396 /// after the last InstructionCombining pass), takes care of hoisting
397 /// loop-invariant portions of expressions, after considering what
398 /// can be folded using target addressing modes.
400 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
401 const SCEV *const *op_end,
405 Type *OriginalElTy = PTy->getElementType();
406 Type *ElTy = OriginalElTy;
407 SmallVector<Value *, 4> GepIndices;
408 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
409 bool AnyNonZeroIndices = false;
411 // Split AddRecs up into parts as either of the parts may be usable
412 // without the other.
413 SplitAddRecs(Ops, Ty, SE);
415 Type *IntPtrTy = DL.getIntPtrType(PTy);
417 // Descend down the pointer's type and attempt to convert the other
418 // operands into GEP indices, at each level. The first index in a GEP
419 // indexes into the array implied by the pointer operand; the rest of
420 // the indices index into the element or field type selected by the
423 // If the scale size is not 0, attempt to factor out a scale for
425 SmallVector<const SCEV *, 8> ScaledOps;
426 if (ElTy->isSized()) {
427 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
428 if (!ElSize->isZero()) {
429 SmallVector<const SCEV *, 8> NewOps;
430 for (const SCEV *Op : Ops) {
431 const SCEV *Remainder = SE.getConstant(Ty, 0);
432 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
433 // Op now has ElSize factored out.
434 ScaledOps.push_back(Op);
435 if (!Remainder->isZero())
436 NewOps.push_back(Remainder);
437 AnyNonZeroIndices = true;
439 // The operand was not divisible, so add it to the list of operands
440 // we'll scan next iteration.
441 NewOps.push_back(Op);
444 // If we made any changes, update Ops.
445 if (!ScaledOps.empty()) {
447 SimplifyAddOperands(Ops, Ty, SE);
452 // Record the scaled array index for this level of the type. If
453 // we didn't find any operands that could be factored, tentatively
454 // assume that element zero was selected (since the zero offset
455 // would obviously be folded away).
456 Value *Scaled = ScaledOps.empty() ?
457 Constant::getNullValue(Ty) :
458 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
459 GepIndices.push_back(Scaled);
461 // Collect struct field index operands.
462 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
463 bool FoundFieldNo = false;
464 // An empty struct has no fields.
465 if (STy->getNumElements() == 0) break;
466 // Field offsets are known. See if a constant offset falls within any of
467 // the struct fields.
470 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
471 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
472 const StructLayout &SL = *DL.getStructLayout(STy);
473 uint64_t FullOffset = C->getValue()->getZExtValue();
474 if (FullOffset < SL.getSizeInBytes()) {
475 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
476 GepIndices.push_back(
477 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
478 ElTy = STy->getTypeAtIndex(ElIdx);
480 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
481 AnyNonZeroIndices = true;
485 // If no struct field offsets were found, tentatively assume that
486 // field zero was selected (since the zero offset would obviously
489 ElTy = STy->getTypeAtIndex(0u);
490 GepIndices.push_back(
491 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
495 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
496 ElTy = ATy->getElementType();
501 // If none of the operands were convertible to proper GEP indices, cast
502 // the base to i8* and do an ugly getelementptr with that. It's still
503 // better than ptrtoint+arithmetic+inttoptr at least.
504 if (!AnyNonZeroIndices) {
505 // Cast the base to i8*.
506 V = InsertNoopCastOfTo(V,
507 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
509 assert(!isa<Instruction>(V) ||
510 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
512 // Expand the operands for a plain byte offset.
513 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
515 // Fold a GEP with constant operands.
516 if (Constant *CLHS = dyn_cast<Constant>(V))
517 if (Constant *CRHS = dyn_cast<Constant>(Idx))
518 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
521 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
522 unsigned ScanLimit = 6;
523 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
524 // Scanning starts from the last instruction before the insertion point.
525 BasicBlock::iterator IP = Builder.GetInsertPoint();
526 if (IP != BlockBegin) {
528 for (; ScanLimit; --IP, --ScanLimit) {
529 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
531 if (isa<DbgInfoIntrinsic>(IP))
533 if (IP->getOpcode() == Instruction::GetElementPtr &&
534 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
536 if (IP == BlockBegin) break;
540 // Save the original insertion point so we can restore it when we're done.
541 BuilderType::InsertPointGuard Guard(Builder);
543 // Move the insertion point out of as many loops as we can.
544 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
545 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
546 BasicBlock *Preheader = L->getLoopPreheader();
547 if (!Preheader) break;
549 // Ok, move up a level.
550 Builder.SetInsertPoint(Preheader->getTerminator());
554 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
555 rememberInstruction(GEP);
560 // Save the original insertion point so we can restore it when we're done.
561 BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
563 // Move the insertion point out of as many loops as we can.
564 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
565 if (!L->isLoopInvariant(V)) break;
567 bool AnyIndexNotLoopInvariant =
568 std::any_of(GepIndices.begin(), GepIndices.end(),
569 [L](Value *Op) { return !L->isLoopInvariant(Op); });
571 if (AnyIndexNotLoopInvariant)
574 BasicBlock *Preheader = L->getLoopPreheader();
575 if (!Preheader) break;
577 // Ok, move up a level.
578 Builder.SetInsertPoint(Preheader->getTerminator());
581 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
582 // because ScalarEvolution may have changed the address arithmetic to
583 // compute a value which is beyond the end of the allocated object.
585 if (V->getType() != PTy)
586 Casted = InsertNoopCastOfTo(Casted, PTy);
587 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
588 Ops.push_back(SE.getUnknown(GEP));
589 rememberInstruction(GEP);
591 // Restore the original insert point.
592 Builder.restoreIP(SaveInsertPt);
594 return expand(SE.getAddExpr(Ops));
597 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
598 /// SCEV expansion. If they are nested, this is the most nested. If they are
599 /// neighboring, pick the later.
600 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
604 if (A->contains(B)) return B;
605 if (B->contains(A)) return A;
606 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
607 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
608 return A; // Arbitrarily break the tie.
611 /// getRelevantLoop - Get the most relevant loop associated with the given
612 /// expression, according to PickMostRelevantLoop.
613 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
614 // Test whether we've already computed the most relevant loop for this SCEV.
615 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
617 return Pair.first->second;
619 if (isa<SCEVConstant>(S))
620 // A constant has no relevant loops.
622 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
623 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
624 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
625 // A non-instruction has no relevant loops.
628 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
629 const Loop *L = nullptr;
630 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
632 for (const SCEV *Op : N->operands())
633 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
634 return RelevantLoops[N] = L;
636 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
637 const Loop *Result = getRelevantLoop(C->getOperand());
638 return RelevantLoops[C] = Result;
640 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
641 const Loop *Result = PickMostRelevantLoop(
642 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
643 return RelevantLoops[D] = Result;
645 llvm_unreachable("Unexpected SCEV type!");
650 /// LoopCompare - Compare loops by PickMostRelevantLoop.
654 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
656 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
657 std::pair<const Loop *, const SCEV *> RHS) const {
658 // Keep pointer operands sorted at the end.
659 if (LHS.second->getType()->isPointerTy() !=
660 RHS.second->getType()->isPointerTy())
661 return LHS.second->getType()->isPointerTy();
663 // Compare loops with PickMostRelevantLoop.
664 if (LHS.first != RHS.first)
665 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
667 // If one operand is a non-constant negative and the other is not,
668 // put the non-constant negative on the right so that a sub can
669 // be used instead of a negate and add.
670 if (LHS.second->isNonConstantNegative()) {
671 if (!RHS.second->isNonConstantNegative())
673 } else if (RHS.second->isNonConstantNegative())
676 // Otherwise they are equivalent according to this comparison.
683 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
684 Type *Ty = SE.getEffectiveSCEVType(S->getType());
686 // Collect all the add operands in a loop, along with their associated loops.
687 // Iterate in reverse so that constants are emitted last, all else equal, and
688 // so that pointer operands are inserted first, which the code below relies on
689 // to form more involved GEPs.
690 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
691 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
692 E(S->op_begin()); I != E; ++I)
693 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
695 // Sort by loop. Use a stable sort so that constants follow non-constants and
696 // pointer operands precede non-pointer operands.
697 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
699 // Emit instructions to add all the operands. Hoist as much as possible
700 // out of loops, and form meaningful getelementptrs where possible.
701 Value *Sum = nullptr;
702 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
703 const Loop *CurLoop = I->first;
704 const SCEV *Op = I->second;
706 // This is the first operand. Just expand it.
709 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
710 // The running sum expression is a pointer. Try to form a getelementptr
711 // at this level with that as the base.
712 SmallVector<const SCEV *, 4> NewOps;
713 for (; I != E && I->first == CurLoop; ++I) {
714 // If the operand is SCEVUnknown and not instructions, peek through
715 // it, to enable more of it to be folded into the GEP.
716 const SCEV *X = I->second;
717 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
718 if (!isa<Instruction>(U->getValue()))
719 X = SE.getSCEV(U->getValue());
722 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
723 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
724 // The running sum is an integer, and there's a pointer at this level.
725 // Try to form a getelementptr. If the running sum is instructions,
726 // use a SCEVUnknown to avoid re-analyzing them.
727 SmallVector<const SCEV *, 4> NewOps;
728 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
730 for (++I; I != E && I->first == CurLoop; ++I)
731 NewOps.push_back(I->second);
732 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
733 } else if (Op->isNonConstantNegative()) {
734 // Instead of doing a negate and add, just do a subtract.
735 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
736 Sum = InsertNoopCastOfTo(Sum, Ty);
737 Sum = InsertBinop(Instruction::Sub, Sum, W);
741 Value *W = expandCodeFor(Op, Ty);
742 Sum = InsertNoopCastOfTo(Sum, Ty);
743 // Canonicalize a constant to the RHS.
744 if (isa<Constant>(Sum)) std::swap(Sum, W);
745 Sum = InsertBinop(Instruction::Add, Sum, W);
753 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
754 Type *Ty = SE.getEffectiveSCEVType(S->getType());
756 // Collect all the mul operands in a loop, along with their associated loops.
757 // Iterate in reverse so that constants are emitted last, all else equal.
758 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
759 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
760 E(S->op_begin()); I != E; ++I)
761 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
763 // Sort by loop. Use a stable sort so that constants follow non-constants.
764 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
766 // Emit instructions to mul all the operands. Hoist as much as possible
768 Value *Prod = nullptr;
769 for (const auto &I : OpsAndLoops) {
770 const SCEV *Op = I.second;
772 // This is the first operand. Just expand it.
774 } else if (Op->isAllOnesValue()) {
775 // Instead of doing a multiply by negative one, just do a negate.
776 Prod = InsertNoopCastOfTo(Prod, Ty);
777 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
780 Value *W = expandCodeFor(Op, Ty);
781 Prod = InsertNoopCastOfTo(Prod, Ty);
782 // Canonicalize a constant to the RHS.
783 if (isa<Constant>(Prod)) std::swap(Prod, W);
785 if (match(W, m_Power2(RHS))) {
786 // Canonicalize Prod*(1<<C) to Prod<<C.
787 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
788 Prod = InsertBinop(Instruction::Shl, Prod,
789 ConstantInt::get(Ty, RHS->logBase2()));
791 Prod = InsertBinop(Instruction::Mul, Prod, W);
799 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
800 Type *Ty = SE.getEffectiveSCEVType(S->getType());
802 Value *LHS = expandCodeFor(S->getLHS(), Ty);
803 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
804 const APInt &RHS = SC->getValue()->getValue();
805 if (RHS.isPowerOf2())
806 return InsertBinop(Instruction::LShr, LHS,
807 ConstantInt::get(Ty, RHS.logBase2()));
810 Value *RHS = expandCodeFor(S->getRHS(), Ty);
811 return InsertBinop(Instruction::UDiv, LHS, RHS);
814 /// Move parts of Base into Rest to leave Base with the minimal
815 /// expression that provides a pointer operand suitable for a
817 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
818 ScalarEvolution &SE) {
819 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
820 Base = A->getStart();
821 Rest = SE.getAddExpr(Rest,
822 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
823 A->getStepRecurrence(SE),
825 A->getNoWrapFlags(SCEV::FlagNW)));
827 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
828 Base = A->getOperand(A->getNumOperands()-1);
829 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
830 NewAddOps.back() = Rest;
831 Rest = SE.getAddExpr(NewAddOps);
832 ExposePointerBase(Base, Rest, SE);
836 /// Determine if this is a well-behaved chain of instructions leading back to
837 /// the PHI. If so, it may be reused by expanded expressions.
838 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
840 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
841 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
843 // If any of the operands don't dominate the insert position, bail.
844 // Addrec operands are always loop-invariant, so this can only happen
845 // if there are instructions which haven't been hoisted.
846 if (L == IVIncInsertLoop) {
847 for (User::op_iterator OI = IncV->op_begin()+1,
848 OE = IncV->op_end(); OI != OE; ++OI)
849 if (Instruction *OInst = dyn_cast<Instruction>(OI))
850 if (!SE.DT.dominates(OInst, IVIncInsertPos))
853 // Advance to the next instruction.
854 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
858 if (IncV->mayHaveSideEffects())
864 return isNormalAddRecExprPHI(PN, IncV, L);
867 /// getIVIncOperand returns an induction variable increment's induction
868 /// variable operand.
870 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
871 /// operands dominate InsertPos.
873 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
874 /// simple patterns generated by getAddRecExprPHILiterally and
875 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
876 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
877 Instruction *InsertPos,
879 if (IncV == InsertPos)
882 switch (IncV->getOpcode()) {
885 // Check for a simple Add/Sub or GEP of a loop invariant step.
886 case Instruction::Add:
887 case Instruction::Sub: {
888 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
889 if (!OInst || SE.DT.dominates(OInst, InsertPos))
890 return dyn_cast<Instruction>(IncV->getOperand(0));
893 case Instruction::BitCast:
894 return dyn_cast<Instruction>(IncV->getOperand(0));
895 case Instruction::GetElementPtr:
896 for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
897 if (isa<Constant>(*I))
899 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
900 if (!SE.DT.dominates(OInst, InsertPos))
904 // allow any kind of GEP as long as it can be hoisted.
907 // This must be a pointer addition of constants (pretty), which is already
908 // handled, or some number of address-size elements (ugly). Ugly geps
909 // have 2 operands. i1* is used by the expander to represent an
910 // address-size element.
911 if (IncV->getNumOperands() != 2)
913 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
914 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
915 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
919 return dyn_cast<Instruction>(IncV->getOperand(0));
923 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
924 /// it available to other uses in this loop. Recursively hoist any operands,
925 /// until we reach a value that dominates InsertPos.
926 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
927 if (SE.DT.dominates(IncV, InsertPos))
930 // InsertPos must itself dominate IncV so that IncV's new position satisfies
931 // its existing users.
932 if (isa<PHINode>(InsertPos) ||
933 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
936 // Check that the chain of IV operands leading back to Phi can be hoisted.
937 SmallVector<Instruction*, 4> IVIncs;
939 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
942 // IncV is safe to hoist.
943 IVIncs.push_back(IncV);
945 if (SE.DT.dominates(IncV, InsertPos))
948 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
949 (*I)->moveBefore(InsertPos);
954 /// Determine if this cyclic phi is in a form that would have been generated by
955 /// LSR. We don't care if the phi was actually expanded in this pass, as long
956 /// as it is in a low-cost form, for example, no implied multiplication. This
957 /// should match any patterns generated by getAddRecExprPHILiterally and
959 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
961 for(Instruction *IVOper = IncV;
962 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
963 /*allowScale=*/false));) {
970 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
971 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
972 /// need to materialize IV increments elsewhere to handle difficult situations.
973 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
974 Type *ExpandTy, Type *IntTy,
977 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
978 if (ExpandTy->isPointerTy()) {
979 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
980 // If the step isn't constant, don't use an implicitly scaled GEP, because
981 // that would require a multiply inside the loop.
982 if (!isa<ConstantInt>(StepV))
983 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
984 GEPPtrTy->getAddressSpace());
985 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
986 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
987 if (IncV->getType() != PN->getType()) {
988 IncV = Builder.CreateBitCast(IncV, PN->getType());
989 rememberInstruction(IncV);
993 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
994 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
995 rememberInstruction(IncV);
1000 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
1001 /// position. This routine assumes that this is possible (has been checked).
1002 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1003 Instruction *Pos, PHINode *LoopPhi) {
1005 if (DT->dominates(InstToHoist, Pos))
1007 // Make sure the increment is where we want it. But don't move it
1008 // down past a potential existing post-inc user.
1009 InstToHoist->moveBefore(Pos);
1011 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1012 } while (InstToHoist != LoopPhi);
1015 /// \brief Check whether we can cheaply express the requested SCEV in terms of
1016 /// the available PHI SCEV by truncation and/or inversion of the step.
1017 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1018 const SCEVAddRecExpr *Phi,
1019 const SCEVAddRecExpr *Requested,
1021 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1022 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1024 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1027 // Try truncate it if necessary.
1028 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1032 // Check whether truncation will help.
1033 if (Phi == Requested) {
1038 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1039 if (SE.getAddExpr(Requested->getStart(),
1040 SE.getNegativeSCEV(Requested)) == Phi) {
1048 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1049 if (!isa<IntegerType>(AR->getType()))
1052 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1053 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1054 const SCEV *Step = AR->getStepRecurrence(SE);
1055 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1056 SE.getSignExtendExpr(AR, WideTy));
1057 const SCEV *ExtendAfterOp =
1058 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1059 return ExtendAfterOp == OpAfterExtend;
1062 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1063 if (!isa<IntegerType>(AR->getType()))
1066 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1067 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1068 const SCEV *Step = AR->getStepRecurrence(SE);
1069 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1070 SE.getZeroExtendExpr(AR, WideTy));
1071 const SCEV *ExtendAfterOp =
1072 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1073 return ExtendAfterOp == OpAfterExtend;
1076 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1077 /// the base addrec, which is the addrec without any non-loop-dominating
1078 /// values, and return the PHI.
1080 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1086 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1088 // Reuse a previously-inserted PHI, if present.
1089 BasicBlock *LatchBlock = L->getLoopLatch();
1091 PHINode *AddRecPhiMatch = nullptr;
1092 Instruction *IncV = nullptr;
1096 // Only try partially matching scevs that need truncation and/or
1097 // step-inversion if we know this loop is outside the current loop.
1098 bool TryNonMatchingSCEV =
1100 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1102 for (auto &I : *L->getHeader()) {
1103 auto *PN = dyn_cast<PHINode>(&I);
1104 if (!PN || !SE.isSCEVable(PN->getType()))
1107 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1111 bool IsMatchingSCEV = PhiSCEV == Normalized;
1112 // We only handle truncation and inversion of phi recurrences for the
1113 // expanded expression if the expanded expression's loop dominates the
1114 // loop we insert to. Check now, so we can bail out early.
1115 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1118 Instruction *TempIncV =
1119 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1121 // Check whether we can reuse this PHI node.
1123 if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1125 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1128 if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1132 // Stop if we have found an exact match SCEV.
1133 if (IsMatchingSCEV) {
1137 AddRecPhiMatch = PN;
1141 // Try whether the phi can be translated into the requested form
1142 // (truncated and/or offset by a constant).
1143 if ((!TruncTy || InvertStep) &&
1144 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1145 // Record the phi node. But don't stop we might find an exact match
1147 AddRecPhiMatch = PN;
1149 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1153 if (AddRecPhiMatch) {
1154 // Potentially, move the increment. We have made sure in
1155 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1156 if (L == IVIncInsertLoop)
1157 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1159 // Ok, the add recurrence looks usable.
1160 // Remember this PHI, even in post-inc mode.
1161 InsertedValues.insert(AddRecPhiMatch);
1162 // Remember the increment.
1163 rememberInstruction(IncV);
1164 return AddRecPhiMatch;
1168 // Save the original insertion point so we can restore it when we're done.
1169 BuilderType::InsertPointGuard Guard(Builder);
1171 // Another AddRec may need to be recursively expanded below. For example, if
1172 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1173 // loop. Remove this loop from the PostIncLoops set before expanding such
1174 // AddRecs. Otherwise, we cannot find a valid position for the step
1175 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1176 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1177 // so it's not worth implementing SmallPtrSet::swap.
1178 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1179 PostIncLoops.clear();
1181 // Expand code for the start value.
1183 expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
1185 // StartV must be hoisted into L's preheader to dominate the new phi.
1186 assert(!isa<Instruction>(StartV) ||
1187 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1190 // Expand code for the step value. Do this before creating the PHI so that PHI
1191 // reuse code doesn't see an incomplete PHI.
1192 const SCEV *Step = Normalized->getStepRecurrence(SE);
1193 // If the stride is negative, insert a sub instead of an add for the increment
1194 // (unless it's a constant, because subtracts of constants are canonicalized
1196 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1198 Step = SE.getNegativeSCEV(Step);
1199 // Expand the step somewhere that dominates the loop header.
1200 Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1202 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1203 // we actually do emit an addition. It does not apply if we emit a
1205 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1206 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1209 BasicBlock *Header = L->getHeader();
1210 Builder.SetInsertPoint(Header, Header->begin());
1211 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1212 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1213 Twine(IVName) + ".iv");
1214 rememberInstruction(PN);
1216 // Create the step instructions and populate the PHI.
1217 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1218 BasicBlock *Pred = *HPI;
1220 // Add a start value.
1221 if (!L->contains(Pred)) {
1222 PN->addIncoming(StartV, Pred);
1226 // Create a step value and add it to the PHI.
1227 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1228 // instructions at IVIncInsertPos.
1229 Instruction *InsertPos = L == IVIncInsertLoop ?
1230 IVIncInsertPos : Pred->getTerminator();
1231 Builder.SetInsertPoint(InsertPos);
1232 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1234 if (isa<OverflowingBinaryOperator>(IncV)) {
1236 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1238 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1240 PN->addIncoming(IncV, Pred);
1243 // After expanding subexpressions, restore the PostIncLoops set so the caller
1244 // can ensure that IVIncrement dominates the current uses.
1245 PostIncLoops = SavedPostIncLoops;
1247 // Remember this PHI, even in post-inc mode.
1248 InsertedValues.insert(PN);
1253 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1254 Type *STy = S->getType();
1255 Type *IntTy = SE.getEffectiveSCEVType(STy);
1256 const Loop *L = S->getLoop();
1258 // Determine a normalized form of this expression, which is the expression
1259 // before any post-inc adjustment is made.
1260 const SCEVAddRecExpr *Normalized = S;
1261 if (PostIncLoops.count(L)) {
1262 PostIncLoopSet Loops;
1264 Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
1265 Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
1268 // Strip off any non-loop-dominating component from the addrec start.
1269 const SCEV *Start = Normalized->getStart();
1270 const SCEV *PostLoopOffset = nullptr;
1271 if (!SE.properlyDominates(Start, L->getHeader())) {
1272 PostLoopOffset = Start;
1273 Start = SE.getConstant(Normalized->getType(), 0);
1274 Normalized = cast<SCEVAddRecExpr>(
1275 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1276 Normalized->getLoop(),
1277 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1280 // Strip off any non-loop-dominating component from the addrec step.
1281 const SCEV *Step = Normalized->getStepRecurrence(SE);
1282 const SCEV *PostLoopScale = nullptr;
1283 if (!SE.dominates(Step, L->getHeader())) {
1284 PostLoopScale = Step;
1285 Step = SE.getConstant(Normalized->getType(), 1);
1287 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1288 Start, Step, Normalized->getLoop(),
1289 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1292 // Expand the core addrec. If we need post-loop scaling, force it to
1293 // expand to an integer type to avoid the need for additional casting.
1294 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1295 // In some cases, we decide to reuse an existing phi node but need to truncate
1296 // it and/or invert the step.
1297 Type *TruncTy = nullptr;
1298 bool InvertStep = false;
1299 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1300 TruncTy, InvertStep);
1302 // Accommodate post-inc mode, if necessary.
1304 if (!PostIncLoops.count(L))
1307 // In PostInc mode, use the post-incremented value.
1308 BasicBlock *LatchBlock = L->getLoopLatch();
1309 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1310 Result = PN->getIncomingValueForBlock(LatchBlock);
1312 // For an expansion to use the postinc form, the client must call
1313 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1314 // or dominated by IVIncInsertPos.
1315 if (isa<Instruction>(Result) &&
1316 !SE.DT.dominates(cast<Instruction>(Result),
1317 &*Builder.GetInsertPoint())) {
1318 // The induction variable's postinc expansion does not dominate this use.
1319 // IVUsers tries to prevent this case, so it is rare. However, it can
1320 // happen when an IVUser outside the loop is not dominated by the latch
1321 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1322 // all cases. Consider a phi outide whose operand is replaced during
1323 // expansion with the value of the postinc user. Without fundamentally
1324 // changing the way postinc users are tracked, the only remedy is
1325 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1326 // but hopefully expandCodeFor handles that.
1328 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1330 Step = SE.getNegativeSCEV(Step);
1333 // Expand the step somewhere that dominates the loop header.
1334 BuilderType::InsertPointGuard Guard(Builder);
1335 StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1337 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1341 // We have decided to reuse an induction variable of a dominating loop. Apply
1342 // truncation and/or invertion of the step.
1344 Type *ResTy = Result->getType();
1345 // Normalize the result type.
1346 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1347 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1348 // Truncate the result.
1349 if (TruncTy != Result->getType()) {
1350 Result = Builder.CreateTrunc(Result, TruncTy);
1351 rememberInstruction(Result);
1353 // Invert the result.
1355 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1357 rememberInstruction(Result);
1361 // Re-apply any non-loop-dominating scale.
1362 if (PostLoopScale) {
1363 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1364 Result = InsertNoopCastOfTo(Result, IntTy);
1365 Result = Builder.CreateMul(Result,
1366 expandCodeFor(PostLoopScale, IntTy));
1367 rememberInstruction(Result);
1370 // Re-apply any non-loop-dominating offset.
1371 if (PostLoopOffset) {
1372 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1373 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1374 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1376 Result = InsertNoopCastOfTo(Result, IntTy);
1377 Result = Builder.CreateAdd(Result,
1378 expandCodeFor(PostLoopOffset, IntTy));
1379 rememberInstruction(Result);
1386 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1387 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1389 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1390 const Loop *L = S->getLoop();
1392 // First check for an existing canonical IV in a suitable type.
1393 PHINode *CanonicalIV = nullptr;
1394 if (PHINode *PN = L->getCanonicalInductionVariable())
1395 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1398 // Rewrite an AddRec in terms of the canonical induction variable, if
1399 // its type is more narrow.
1401 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1402 SE.getTypeSizeInBits(Ty)) {
1403 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1404 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1405 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1406 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1407 S->getNoWrapFlags(SCEV::FlagNW)));
1408 BasicBlock::iterator NewInsertPt =
1409 findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
1410 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1415 // {X,+,F} --> X + {0,+,F}
1416 if (!S->getStart()->isZero()) {
1417 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1418 NewOps[0] = SE.getConstant(Ty, 0);
1419 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1420 S->getNoWrapFlags(SCEV::FlagNW));
1422 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1423 // comments on expandAddToGEP for details.
1424 const SCEV *Base = S->getStart();
1425 const SCEV *RestArray[1] = { Rest };
1426 // Dig into the expression to find the pointer base for a GEP.
1427 ExposePointerBase(Base, RestArray[0], SE);
1428 // If we found a pointer, expand the AddRec with a GEP.
1429 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1430 // Make sure the Base isn't something exotic, such as a multiplied
1431 // or divided pointer value. In those cases, the result type isn't
1432 // actually a pointer type.
1433 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1434 Value *StartV = expand(Base);
1435 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1436 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1440 // Just do a normal add. Pre-expand the operands to suppress folding.
1441 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1442 SE.getUnknown(expand(Rest))));
1445 // If we don't yet have a canonical IV, create one.
1447 // Create and insert the PHI node for the induction variable in the
1449 BasicBlock *Header = L->getHeader();
1450 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1451 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1453 rememberInstruction(CanonicalIV);
1455 SmallSet<BasicBlock *, 4> PredSeen;
1456 Constant *One = ConstantInt::get(Ty, 1);
1457 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1458 BasicBlock *HP = *HPI;
1459 if (!PredSeen.insert(HP).second) {
1460 // There must be an incoming value for each predecessor, even the
1462 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1466 if (L->contains(HP)) {
1467 // Insert a unit add instruction right before the terminator
1468 // corresponding to the back-edge.
1469 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1471 HP->getTerminator());
1472 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1473 rememberInstruction(Add);
1474 CanonicalIV->addIncoming(Add, HP);
1476 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1481 // {0,+,1} --> Insert a canonical induction variable into the loop!
1482 if (S->isAffine() && S->getOperand(1)->isOne()) {
1483 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1484 "IVs with types different from the canonical IV should "
1485 "already have been handled!");
1489 // {0,+,F} --> {0,+,1} * F
1491 // If this is a simple linear addrec, emit it now as a special case.
1492 if (S->isAffine()) // {0,+,F} --> i*F
1494 expand(SE.getTruncateOrNoop(
1495 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1496 SE.getNoopOrAnyExtend(S->getOperand(1),
1497 CanonicalIV->getType())),
1500 // If this is a chain of recurrences, turn it into a closed form, using the
1501 // folders, then expandCodeFor the closed form. This allows the folders to
1502 // simplify the expression without having to build a bunch of special code
1503 // into this folder.
1504 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1506 // Promote S up to the canonical IV type, if the cast is foldable.
1507 const SCEV *NewS = S;
1508 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1509 if (isa<SCEVAddRecExpr>(Ext))
1512 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1513 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1515 // Truncate the result down to the original type, if needed.
1516 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1520 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1521 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1522 Value *V = expandCodeFor(S->getOperand(),
1523 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1524 Value *I = Builder.CreateTrunc(V, Ty);
1525 rememberInstruction(I);
1529 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1530 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1531 Value *V = expandCodeFor(S->getOperand(),
1532 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1533 Value *I = Builder.CreateZExt(V, Ty);
1534 rememberInstruction(I);
1538 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1539 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1540 Value *V = expandCodeFor(S->getOperand(),
1541 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1542 Value *I = Builder.CreateSExt(V, Ty);
1543 rememberInstruction(I);
1547 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1548 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1549 Type *Ty = LHS->getType();
1550 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1551 // In the case of mixed integer and pointer types, do the
1552 // rest of the comparisons as integer.
1553 if (S->getOperand(i)->getType() != Ty) {
1554 Ty = SE.getEffectiveSCEVType(Ty);
1555 LHS = InsertNoopCastOfTo(LHS, Ty);
1557 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1558 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1559 rememberInstruction(ICmp);
1560 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1561 rememberInstruction(Sel);
1564 // In the case of mixed integer and pointer types, cast the
1565 // final result back to the pointer type.
1566 if (LHS->getType() != S->getType())
1567 LHS = InsertNoopCastOfTo(LHS, S->getType());
1571 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1572 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1573 Type *Ty = LHS->getType();
1574 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1575 // In the case of mixed integer and pointer types, do the
1576 // rest of the comparisons as integer.
1577 if (S->getOperand(i)->getType() != Ty) {
1578 Ty = SE.getEffectiveSCEVType(Ty);
1579 LHS = InsertNoopCastOfTo(LHS, Ty);
1581 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1582 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1583 rememberInstruction(ICmp);
1584 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1585 rememberInstruction(Sel);
1588 // In the case of mixed integer and pointer types, cast the
1589 // final result back to the pointer type.
1590 if (LHS->getType() != S->getType())
1591 LHS = InsertNoopCastOfTo(LHS, S->getType());
1595 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1598 Builder.SetInsertPoint(IP);
1599 return expandCodeFor(SH, Ty);
1602 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1603 // Expand the code for this SCEV.
1604 Value *V = expand(SH);
1606 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1607 "non-trivial casts should be done with the SCEVs directly!");
1608 V = InsertNoopCastOfTo(V, Ty);
1613 Value *SCEVExpander::expand(const SCEV *S) {
1614 // Compute an insertion point for this SCEV object. Hoist the instructions
1615 // as far out in the loop nest as possible.
1616 Instruction *InsertPt = &*Builder.GetInsertPoint();
1617 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1618 L = L->getParentLoop())
1619 if (SE.isLoopInvariant(S, L)) {
1621 if (BasicBlock *Preheader = L->getLoopPreheader())
1622 InsertPt = Preheader->getTerminator();
1624 // LSR sets the insertion point for AddRec start/step values to the
1625 // block start to simplify value reuse, even though it's an invalid
1626 // position. SCEVExpander must correct for this in all cases.
1627 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1630 // If the SCEV is computable at this level, insert it into the header
1631 // after the PHIs (and after any other instructions that we've inserted
1632 // there) so that it is guaranteed to dominate any user inside the loop.
1633 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1634 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1635 while (InsertPt != Builder.GetInsertPoint()
1636 && (isInsertedInstruction(InsertPt)
1637 || isa<DbgInfoIntrinsic>(InsertPt))) {
1638 InsertPt = &*std::next(InsertPt->getIterator());
1643 // Check to see if we already expanded this here.
1644 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1645 if (I != InsertedExpressions.end())
1648 BuilderType::InsertPointGuard Guard(Builder);
1649 Builder.SetInsertPoint(InsertPt);
1651 // Expand the expression into instructions.
1652 Value *V = visit(S);
1654 // Remember the expanded value for this SCEV at this location.
1656 // This is independent of PostIncLoops. The mapped value simply materializes
1657 // the expression at this insertion point. If the mapped value happened to be
1658 // a postinc expansion, it could be reused by a non-postinc user, but only if
1659 // its insertion point was already at the head of the loop.
1660 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1664 void SCEVExpander::rememberInstruction(Value *I) {
1665 if (!PostIncLoops.empty())
1666 InsertedPostIncValues.insert(I);
1668 InsertedValues.insert(I);
1671 /// getOrInsertCanonicalInductionVariable - This method returns the
1672 /// canonical induction variable of the specified type for the specified
1673 /// loop (inserting one if there is none). A canonical induction variable
1674 /// starts at zero and steps by one on each iteration.
1676 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1678 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1680 // Build a SCEV for {0,+,1}<L>.
1681 // Conservatively use FlagAnyWrap for now.
1682 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1683 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1685 // Emit code for it.
1686 BuilderType::InsertPointGuard Guard(Builder);
1688 cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
1693 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1694 /// replace them with their most canonical representative. Return the number of
1695 /// phis eliminated.
1697 /// This does not depend on any SCEVExpander state but should be used in
1698 /// the same context that SCEVExpander is used.
1699 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1700 SmallVectorImpl<WeakVH> &DeadInsts,
1701 const TargetTransformInfo *TTI) {
1702 // Find integer phis in order of increasing width.
1703 SmallVector<PHINode*, 8> Phis;
1704 for (auto &I : *L->getHeader()) {
1705 if (auto *PN = dyn_cast<PHINode>(&I))
1712 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1713 // Put pointers at the back and make sure pointer < pointer = false.
1714 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1715 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1716 return RHS->getType()->getPrimitiveSizeInBits() <
1717 LHS->getType()->getPrimitiveSizeInBits();
1720 unsigned NumElim = 0;
1721 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1722 // Process phis from wide to narrow. Map wide phis to their truncation
1723 // so narrow phis can reuse them.
1724 for (PHINode *Phi : Phis) {
1725 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1726 if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
1728 if (!SE.isSCEVable(PN->getType()))
1730 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1733 return Const->getValue();
1736 // Fold constant phis. They may be congruent to other constant phis and
1737 // would confuse the logic below that expects proper IVs.
1738 if (Value *V = SimplifyPHINode(Phi)) {
1739 if (V->getType() != Phi->getType())
1741 Phi->replaceAllUsesWith(V);
1742 DeadInsts.emplace_back(Phi);
1744 DEBUG_WITH_TYPE(DebugType, dbgs()
1745 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1749 if (!SE.isSCEVable(Phi->getType()))
1752 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1755 if (Phi->getType()->isIntegerTy() && TTI
1756 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1757 // This phi can be freely truncated to the narrowest phi type. Map the
1758 // truncated expression to it so it will be reused for narrow types.
1759 const SCEV *TruncExpr =
1760 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1761 ExprToIVMap[TruncExpr] = Phi;
1766 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1768 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1771 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1772 Instruction *OrigInc =
1773 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1774 Instruction *IsomorphicInc =
1775 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1777 // If this phi has the same width but is more canonical, replace the
1778 // original with it. As part of the "more canonical" determination,
1779 // respect a prior decision to use an IV chain.
1780 if (OrigPhiRef->getType() == Phi->getType()
1781 && !(ChainedPhis.count(Phi)
1782 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1783 && (ChainedPhis.count(Phi)
1784 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1785 std::swap(OrigPhiRef, Phi);
1786 std::swap(OrigInc, IsomorphicInc);
1788 // Replacing the congruent phi is sufficient because acyclic redundancy
1789 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1790 // that a phi is congruent, it's often the head of an IV user cycle that
1791 // is isomorphic with the original phi. It's worth eagerly cleaning up the
1792 // common case of a single IV increment so that DeleteDeadPHIs can remove
1793 // cycles that had postinc uses.
1794 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1795 IsomorphicInc->getType());
1796 if (OrigInc != IsomorphicInc
1797 && TruncExpr == SE.getSCEV(IsomorphicInc)
1798 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1799 || hoistIVInc(OrigInc, IsomorphicInc))) {
1800 DEBUG_WITH_TYPE(DebugType, dbgs()
1801 << "INDVARS: Eliminated congruent iv.inc: "
1802 << *IsomorphicInc << '\n');
1803 Value *NewInc = OrigInc;
1804 if (OrigInc->getType() != IsomorphicInc->getType()) {
1805 Instruction *IP = nullptr;
1806 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1807 IP = &*PN->getParent()->getFirstInsertionPt();
1809 IP = OrigInc->getNextNode();
1811 IRBuilder<> Builder(IP);
1812 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1814 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1816 IsomorphicInc->replaceAllUsesWith(NewInc);
1817 DeadInsts.emplace_back(IsomorphicInc);
1820 DEBUG_WITH_TYPE(DebugType, dbgs()
1821 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1823 Value *NewIV = OrigPhiRef;
1824 if (OrigPhiRef->getType() != Phi->getType()) {
1825 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
1826 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1827 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1829 Phi->replaceAllUsesWith(NewIV);
1830 DeadInsts.emplace_back(Phi);
1835 Value *SCEVExpander::findExistingExpansion(const SCEV *S,
1836 const Instruction *At, Loop *L) {
1837 using namespace llvm::PatternMatch;
1839 SmallVector<BasicBlock *, 4> ExitingBlocks;
1840 L->getExitingBlocks(ExitingBlocks);
1842 // Look for suitable value in simple conditions at the loop exits.
1843 for (BasicBlock *BB : ExitingBlocks) {
1844 ICmpInst::Predicate Pred;
1845 Instruction *LHS, *RHS;
1846 BasicBlock *TrueBB, *FalseBB;
1848 if (!match(BB->getTerminator(),
1849 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1853 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1856 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1860 // There is potential to make this significantly smarter, but this simple
1861 // heuristic already gets some interesting cases.
1863 // Can not find suitable value.
1867 bool SCEVExpander::isHighCostExpansionHelper(
1868 const SCEV *S, Loop *L, const Instruction *At,
1869 SmallPtrSetImpl<const SCEV *> &Processed) {
1871 // If we can find an existing value for this scev avaliable at the point "At"
1872 // then consider the expression cheap.
1873 if (At && findExistingExpansion(S, At, L) != nullptr)
1876 // Zero/One operand expressions
1877 switch (S->getSCEVType()) {
1882 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
1885 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
1888 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
1892 if (!Processed.insert(S).second)
1895 if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
1896 // If the divisor is a power of two and the SCEV type fits in a native
1897 // integer, consider the division cheap irrespective of whether it occurs in
1898 // the user code since it can be lowered into a right shift.
1899 if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
1900 if (SC->getValue()->getValue().isPowerOf2()) {
1901 const DataLayout &DL =
1902 L->getHeader()->getParent()->getParent()->getDataLayout();
1903 unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
1904 return DL.isIllegalInteger(Width);
1907 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1908 // HowManyLessThans produced to compute a precise expression, rather than a
1909 // UDiv from the user's code. If we can't find a UDiv in the code with some
1910 // simple searching, assume the former consider UDivExpr expensive to
1912 BasicBlock *ExitingBB = L->getExitingBlock();
1916 // At the beginning of this function we already tried to find existing value
1917 // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
1918 // involving division. This is just a simple search heuristic.
1920 At = &ExitingBB->back();
1921 if (!findExistingExpansion(
1922 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
1926 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
1927 // the exit condition.
1928 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
1931 // Recurse past nary expressions, which commonly occur in the
1932 // BackedgeTakenCount. They may already exist in program code, and if not,
1933 // they are not too expensive rematerialize.
1934 if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
1935 for (auto *Op : NAry->operands())
1936 if (isHighCostExpansionHelper(Op, L, At, Processed))
1940 // If we haven't recognized an expensive SCEV pattern, assume it's an
1941 // expression produced by program code.
1945 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
1948 switch (Pred->getKind()) {
1949 case SCEVPredicate::P_Union:
1950 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
1951 case SCEVPredicate::P_Equal:
1952 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
1954 llvm_unreachable("Unknown SCEV predicate type");
1957 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
1959 Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
1960 Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
1962 Builder.SetInsertPoint(IP);
1963 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
1967 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
1969 auto *BoolType = IntegerType::get(IP->getContext(), 1);
1970 Value *Check = ConstantInt::getNullValue(BoolType);
1972 // Loop over all checks in this set.
1973 for (auto Pred : Union->getPredicates()) {
1974 auto *NextCheck = expandCodeForPredicate(Pred, IP);
1975 Builder.SetInsertPoint(IP);
1976 Check = Builder.CreateOr(Check, NextCheck);
1983 // Search for a SCEV subexpression that is not safe to expand. Any expression
1984 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1985 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
1986 // instruction, but the important thing is that we prove the denominator is
1987 // nonzero before expansion.
1989 // IVUsers already checks that IV-derived expressions are safe. So this check is
1990 // only needed when the expression includes some subexpression that is not IV
1993 // Currently, we only allow division by a nonzero constant here. If this is
1994 // inadequate, we could easily allow division by SCEVUnknown by using
1995 // ValueTracking to check isKnownNonZero().
1997 // We cannot generally expand recurrences unless the step dominates the loop
1998 // header. The expander handles the special case of affine recurrences by
1999 // scaling the recurrence outside the loop, but this technique isn't generally
2000 // applicable. Expanding a nested recurrence outside a loop requires computing
2001 // binomial coefficients. This could be done, but the recurrence has to be in a
2002 // perfectly reduced form, which can't be guaranteed.
2003 struct SCEVFindUnsafe {
2004 ScalarEvolution &SE;
2007 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2009 bool follow(const SCEV *S) {
2010 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2011 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2012 if (!SC || SC->getValue()->isZero()) {
2017 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2018 const SCEV *Step = AR->getStepRecurrence(SE);
2019 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2026 bool isDone() const { return IsUnsafe; }
2031 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2032 SCEVFindUnsafe Search(SE);
2033 visitAll(S, Search);
2034 return !Search.IsUnsafe;