1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/ADT/STLExtras.h"
26 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
27 /// reusing an existing cast if a suitable one exists, moving an existing
28 /// cast if a suitable one exists but isn't in the right place, or
29 /// creating a new one.
30 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
31 Instruction::CastOps Op,
32 BasicBlock::iterator IP) {
33 // Check to see if there is already a cast!
34 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
37 if (U->getType() == Ty)
38 if (CastInst *CI = dyn_cast<CastInst>(U))
39 if (CI->getOpcode() == Op) {
40 // If the cast isn't where we want it, fix it.
41 if (BasicBlock::iterator(CI) != IP) {
42 // Create a new cast, and leave the old cast in place in case
43 // it is being used as an insert point. Clear its operand
44 // so that it doesn't hold anything live.
45 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
47 CI->replaceAllUsesWith(NewCI);
48 CI->setOperand(0, UndefValue::get(V->getType()));
49 rememberInstruction(NewCI);
52 rememberInstruction(CI);
58 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
59 rememberInstruction(I);
63 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
64 /// which must be possible with a noop cast, doing what we can to share
66 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
67 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
68 assert((Op == Instruction::BitCast ||
69 Op == Instruction::PtrToInt ||
70 Op == Instruction::IntToPtr) &&
71 "InsertNoopCastOfTo cannot perform non-noop casts!");
72 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
73 "InsertNoopCastOfTo cannot change sizes!");
75 // Short-circuit unnecessary bitcasts.
76 if (Op == Instruction::BitCast && V->getType() == Ty)
79 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
80 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
81 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
82 if (CastInst *CI = dyn_cast<CastInst>(V))
83 if ((CI->getOpcode() == Instruction::PtrToInt ||
84 CI->getOpcode() == Instruction::IntToPtr) &&
85 SE.getTypeSizeInBits(CI->getType()) ==
86 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
87 return CI->getOperand(0);
88 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
89 if ((CE->getOpcode() == Instruction::PtrToInt ||
90 CE->getOpcode() == Instruction::IntToPtr) &&
91 SE.getTypeSizeInBits(CE->getType()) ==
92 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
93 return CE->getOperand(0);
96 // Fold a cast of a constant.
97 if (Constant *C = dyn_cast<Constant>(V))
98 return ConstantExpr::getCast(Op, C, Ty);
100 // Cast the argument at the beginning of the entry block, after
101 // any bitcasts of other arguments.
102 if (Argument *A = dyn_cast<Argument>(V)) {
103 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
104 while ((isa<BitCastInst>(IP) &&
105 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
106 cast<BitCastInst>(IP)->getOperand(0) != A) ||
107 isa<DbgInfoIntrinsic>(IP) ||
108 isa<LandingPadInst>(IP))
110 return ReuseOrCreateCast(A, Ty, Op, IP);
113 // Cast the instruction immediately after the instruction.
114 Instruction *I = cast<Instruction>(V);
115 BasicBlock::iterator IP = I; ++IP;
116 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
117 IP = II->getNormalDest()->begin();
118 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) ||
119 isa<LandingPadInst>(IP))
121 return ReuseOrCreateCast(I, Ty, Op, IP);
124 /// InsertBinop - Insert the specified binary operator, doing a small amount
125 /// of work to avoid inserting an obviously redundant operation.
126 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
127 Value *LHS, Value *RHS) {
128 // Fold a binop with constant operands.
129 if (Constant *CLHS = dyn_cast<Constant>(LHS))
130 if (Constant *CRHS = dyn_cast<Constant>(RHS))
131 return ConstantExpr::get(Opcode, CLHS, CRHS);
133 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
134 unsigned ScanLimit = 6;
135 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
136 // Scanning starts from the last instruction before the insertion point.
137 BasicBlock::iterator IP = Builder.GetInsertPoint();
138 if (IP != BlockBegin) {
140 for (; ScanLimit; --IP, --ScanLimit) {
141 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
143 if (isa<DbgInfoIntrinsic>(IP))
145 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
146 IP->getOperand(1) == RHS)
148 if (IP == BlockBegin) break;
152 // Save the original insertion point so we can restore it when we're done.
153 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
154 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
156 // Move the insertion point out of as many loops as we can.
157 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
158 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
159 BasicBlock *Preheader = L->getLoopPreheader();
160 if (!Preheader) break;
162 // Ok, move up a level.
163 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
166 // If we haven't found this binop, insert it.
167 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
168 BO->setDebugLoc(SaveInsertPt->getDebugLoc());
169 rememberInstruction(BO);
171 // Restore the original insert point.
173 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
178 /// FactorOutConstant - Test if S is divisible by Factor, using signed
179 /// division. If so, update S with Factor divided out and return true.
180 /// S need not be evenly divisible if a reasonable remainder can be
182 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
183 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
184 /// check to see if the divide was folded.
185 static bool FactorOutConstant(const SCEV *&S,
186 const SCEV *&Remainder,
189 const TargetData *TD) {
190 // Everything is divisible by one.
196 S = SE.getConstant(S->getType(), 1);
200 // For a Constant, check for a multiple of the given factor.
201 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
205 // Check for divisibility.
206 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
208 ConstantInt::get(SE.getContext(),
209 C->getValue()->getValue().sdiv(
210 FC->getValue()->getValue()));
211 // If the quotient is zero and the remainder is non-zero, reject
212 // the value at this scale. It will be considered for subsequent
215 const SCEV *Div = SE.getConstant(CI);
218 SE.getAddExpr(Remainder,
219 SE.getConstant(C->getValue()->getValue().srem(
220 FC->getValue()->getValue())));
226 // In a Mul, check if there is a constant operand which is a multiple
227 // of the given factor.
228 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
230 // With TargetData, the size is known. Check if there is a constant
231 // operand which is a multiple of the given factor. If so, we can
233 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
234 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
235 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
236 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
238 SE.getConstant(C->getValue()->getValue().sdiv(
239 FC->getValue()->getValue()));
240 S = SE.getMulExpr(NewMulOps);
244 // Without TargetData, check if Factor can be factored out of any of the
245 // Mul's operands. If so, we can just remove it.
246 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
247 const SCEV *SOp = M->getOperand(i);
248 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
249 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
250 Remainder->isZero()) {
251 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
253 S = SE.getMulExpr(NewMulOps);
260 // In an AddRec, check if both start and step are divisible.
261 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
262 const SCEV *Step = A->getStepRecurrence(SE);
263 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
264 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
266 if (!StepRem->isZero())
268 const SCEV *Start = A->getStart();
269 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
271 // FIXME: can use A->getNoWrapFlags(FlagNW)
272 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
279 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
280 /// is the number of SCEVAddRecExprs present, which are kept at the end of
283 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
285 ScalarEvolution &SE) {
286 unsigned NumAddRecs = 0;
287 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
289 // Group Ops into non-addrecs and addrecs.
290 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
291 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
292 // Let ScalarEvolution sort and simplify the non-addrecs list.
293 const SCEV *Sum = NoAddRecs.empty() ?
294 SE.getConstant(Ty, 0) :
295 SE.getAddExpr(NoAddRecs);
296 // If it returned an add, use the operands. Otherwise it simplified
297 // the sum into a single value, so just use that.
299 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
300 Ops.append(Add->op_begin(), Add->op_end());
301 else if (!Sum->isZero())
303 // Then append the addrecs.
304 Ops.append(AddRecs.begin(), AddRecs.end());
307 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
308 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
309 /// This helps expose more opportunities for folding parts of the expressions
310 /// into GEP indices.
312 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
314 ScalarEvolution &SE) {
316 SmallVector<const SCEV *, 8> AddRecs;
317 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
318 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
319 const SCEV *Start = A->getStart();
320 if (Start->isZero()) break;
321 const SCEV *Zero = SE.getConstant(Ty, 0);
322 AddRecs.push_back(SE.getAddRecExpr(Zero,
323 A->getStepRecurrence(SE),
325 // FIXME: A->getNoWrapFlags(FlagNW)
327 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
329 Ops.append(Add->op_begin(), Add->op_end());
330 e += Add->getNumOperands();
335 if (!AddRecs.empty()) {
336 // Add the addrecs onto the end of the list.
337 Ops.append(AddRecs.begin(), AddRecs.end());
338 // Resort the operand list, moving any constants to the front.
339 SimplifyAddOperands(Ops, Ty, SE);
343 /// expandAddToGEP - Expand an addition expression with a pointer type into
344 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
345 /// BasicAliasAnalysis and other passes analyze the result. See the rules
346 /// for getelementptr vs. inttoptr in
347 /// http://llvm.org/docs/LangRef.html#pointeraliasing
350 /// Design note: The correctness of using getelementptr here depends on
351 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
352 /// they may introduce pointer arithmetic which may not be safely converted
353 /// into getelementptr.
355 /// Design note: It might seem desirable for this function to be more
356 /// loop-aware. If some of the indices are loop-invariant while others
357 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
358 /// loop-invariant portions of the overall computation outside the loop.
359 /// However, there are a few reasons this is not done here. Hoisting simple
360 /// arithmetic is a low-level optimization that often isn't very
361 /// important until late in the optimization process. In fact, passes
362 /// like InstructionCombining will combine GEPs, even if it means
363 /// pushing loop-invariant computation down into loops, so even if the
364 /// GEPs were split here, the work would quickly be undone. The
365 /// LoopStrengthReduction pass, which is usually run quite late (and
366 /// after the last InstructionCombining pass), takes care of hoisting
367 /// loop-invariant portions of expressions, after considering what
368 /// can be folded using target addressing modes.
370 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
371 const SCEV *const *op_end,
375 Type *ElTy = PTy->getElementType();
376 SmallVector<Value *, 4> GepIndices;
377 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
378 bool AnyNonZeroIndices = false;
380 // Split AddRecs up into parts as either of the parts may be usable
381 // without the other.
382 SplitAddRecs(Ops, Ty, SE);
384 // Descend down the pointer's type and attempt to convert the other
385 // operands into GEP indices, at each level. The first index in a GEP
386 // indexes into the array implied by the pointer operand; the rest of
387 // the indices index into the element or field type selected by the
390 // If the scale size is not 0, attempt to factor out a scale for
392 SmallVector<const SCEV *, 8> ScaledOps;
393 if (ElTy->isSized()) {
394 const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
395 if (!ElSize->isZero()) {
396 SmallVector<const SCEV *, 8> NewOps;
397 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
398 const SCEV *Op = Ops[i];
399 const SCEV *Remainder = SE.getConstant(Ty, 0);
400 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
401 // Op now has ElSize factored out.
402 ScaledOps.push_back(Op);
403 if (!Remainder->isZero())
404 NewOps.push_back(Remainder);
405 AnyNonZeroIndices = true;
407 // The operand was not divisible, so add it to the list of operands
408 // we'll scan next iteration.
409 NewOps.push_back(Ops[i]);
412 // If we made any changes, update Ops.
413 if (!ScaledOps.empty()) {
415 SimplifyAddOperands(Ops, Ty, SE);
420 // Record the scaled array index for this level of the type. If
421 // we didn't find any operands that could be factored, tentatively
422 // assume that element zero was selected (since the zero offset
423 // would obviously be folded away).
424 Value *Scaled = ScaledOps.empty() ?
425 Constant::getNullValue(Ty) :
426 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
427 GepIndices.push_back(Scaled);
429 // Collect struct field index operands.
430 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
431 bool FoundFieldNo = false;
432 // An empty struct has no fields.
433 if (STy->getNumElements() == 0) break;
435 // With TargetData, field offsets are known. See if a constant offset
436 // falls within any of the struct fields.
437 if (Ops.empty()) break;
438 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
439 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
440 const StructLayout &SL = *SE.TD->getStructLayout(STy);
441 uint64_t FullOffset = C->getValue()->getZExtValue();
442 if (FullOffset < SL.getSizeInBytes()) {
443 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
444 GepIndices.push_back(
445 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
446 ElTy = STy->getTypeAtIndex(ElIdx);
448 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
449 AnyNonZeroIndices = true;
454 // Without TargetData, just check for an offsetof expression of the
455 // appropriate struct type.
456 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
457 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
460 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
461 GepIndices.push_back(FieldNo);
463 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
464 Ops[i] = SE.getConstant(Ty, 0);
465 AnyNonZeroIndices = true;
471 // If no struct field offsets were found, tentatively assume that
472 // field zero was selected (since the zero offset would obviously
475 ElTy = STy->getTypeAtIndex(0u);
476 GepIndices.push_back(
477 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
481 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
482 ElTy = ATy->getElementType();
487 // If none of the operands were convertible to proper GEP indices, cast
488 // the base to i8* and do an ugly getelementptr with that. It's still
489 // better than ptrtoint+arithmetic+inttoptr at least.
490 if (!AnyNonZeroIndices) {
491 // Cast the base to i8*.
492 V = InsertNoopCastOfTo(V,
493 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
495 // Expand the operands for a plain byte offset.
496 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
498 // Fold a GEP with constant operands.
499 if (Constant *CLHS = dyn_cast<Constant>(V))
500 if (Constant *CRHS = dyn_cast<Constant>(Idx))
501 return ConstantExpr::getGetElementPtr(CLHS, CRHS);
503 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
504 unsigned ScanLimit = 6;
505 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
506 // Scanning starts from the last instruction before the insertion point.
507 BasicBlock::iterator IP = Builder.GetInsertPoint();
508 if (IP != BlockBegin) {
510 for (; ScanLimit; --IP, --ScanLimit) {
511 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
513 if (isa<DbgInfoIntrinsic>(IP))
515 if (IP->getOpcode() == Instruction::GetElementPtr &&
516 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
518 if (IP == BlockBegin) break;
522 // Save the original insertion point so we can restore it when we're done.
523 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
524 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
526 // Move the insertion point out of as many loops as we can.
527 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
528 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
529 BasicBlock *Preheader = L->getLoopPreheader();
530 if (!Preheader) break;
532 // Ok, move up a level.
533 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
537 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
538 rememberInstruction(GEP);
540 // Restore the original insert point.
542 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
547 // Save the original insertion point so we can restore it when we're done.
548 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
549 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
551 // Move the insertion point out of as many loops as we can.
552 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
553 if (!L->isLoopInvariant(V)) break;
555 bool AnyIndexNotLoopInvariant = false;
556 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
557 E = GepIndices.end(); I != E; ++I)
558 if (!L->isLoopInvariant(*I)) {
559 AnyIndexNotLoopInvariant = true;
562 if (AnyIndexNotLoopInvariant)
565 BasicBlock *Preheader = L->getLoopPreheader();
566 if (!Preheader) break;
568 // Ok, move up a level.
569 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
572 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
573 // because ScalarEvolution may have changed the address arithmetic to
574 // compute a value which is beyond the end of the allocated object.
576 if (V->getType() != PTy)
577 Casted = InsertNoopCastOfTo(Casted, PTy);
578 Value *GEP = Builder.CreateGEP(Casted,
581 Ops.push_back(SE.getUnknown(GEP));
582 rememberInstruction(GEP);
584 // Restore the original insert point.
586 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
588 return expand(SE.getAddExpr(Ops));
591 /// isNonConstantNegative - Return true if the specified scev is negated, but
593 static bool isNonConstantNegative(const SCEV *F) {
594 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F);
595 if (!Mul) return false;
597 // If there is a constant factor, it will be first.
598 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
599 if (!SC) return false;
601 // Return true if the value is negative, this matches things like (-42 * V).
602 return SC->getValue()->getValue().isNegative();
605 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
606 /// SCEV expansion. If they are nested, this is the most nested. If they are
607 /// neighboring, pick the later.
608 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
612 if (A->contains(B)) return B;
613 if (B->contains(A)) return A;
614 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
615 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
616 return A; // Arbitrarily break the tie.
619 /// getRelevantLoop - Get the most relevant loop associated with the given
620 /// expression, according to PickMostRelevantLoop.
621 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
622 // Test whether we've already computed the most relevant loop for this SCEV.
623 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
624 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
626 return Pair.first->second;
628 if (isa<SCEVConstant>(S))
629 // A constant has no relevant loops.
631 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
632 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
633 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
634 // A non-instruction has no relevant loops.
637 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
639 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
641 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
643 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
644 return RelevantLoops[N] = L;
646 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
647 const Loop *Result = getRelevantLoop(C->getOperand());
648 return RelevantLoops[C] = Result;
650 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
652 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
653 getRelevantLoop(D->getRHS()),
655 return RelevantLoops[D] = Result;
657 llvm_unreachable("Unexpected SCEV type!");
663 /// LoopCompare - Compare loops by PickMostRelevantLoop.
667 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
669 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
670 std::pair<const Loop *, const SCEV *> RHS) const {
671 // Keep pointer operands sorted at the end.
672 if (LHS.second->getType()->isPointerTy() !=
673 RHS.second->getType()->isPointerTy())
674 return LHS.second->getType()->isPointerTy();
676 // Compare loops with PickMostRelevantLoop.
677 if (LHS.first != RHS.first)
678 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
680 // If one operand is a non-constant negative and the other is not,
681 // put the non-constant negative on the right so that a sub can
682 // be used instead of a negate and add.
683 if (isNonConstantNegative(LHS.second)) {
684 if (!isNonConstantNegative(RHS.second))
686 } else if (isNonConstantNegative(RHS.second))
689 // Otherwise they are equivalent according to this comparison.
696 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
697 Type *Ty = SE.getEffectiveSCEVType(S->getType());
699 // Collect all the add operands in a loop, along with their associated loops.
700 // Iterate in reverse so that constants are emitted last, all else equal, and
701 // so that pointer operands are inserted first, which the code below relies on
702 // to form more involved GEPs.
703 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
704 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
705 E(S->op_begin()); I != E; ++I)
706 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
708 // Sort by loop. Use a stable sort so that constants follow non-constants and
709 // pointer operands precede non-pointer operands.
710 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
712 // Emit instructions to add all the operands. Hoist as much as possible
713 // out of loops, and form meaningful getelementptrs where possible.
715 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
716 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
717 const Loop *CurLoop = I->first;
718 const SCEV *Op = I->second;
720 // This is the first operand. Just expand it.
723 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
724 // The running sum expression is a pointer. Try to form a getelementptr
725 // at this level with that as the base.
726 SmallVector<const SCEV *, 4> NewOps;
727 for (; I != E && I->first == CurLoop; ++I) {
728 // If the operand is SCEVUnknown and not instructions, peek through
729 // it, to enable more of it to be folded into the GEP.
730 const SCEV *X = I->second;
731 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
732 if (!isa<Instruction>(U->getValue()))
733 X = SE.getSCEV(U->getValue());
736 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
737 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
738 // The running sum is an integer, and there's a pointer at this level.
739 // Try to form a getelementptr. If the running sum is instructions,
740 // use a SCEVUnknown to avoid re-analyzing them.
741 SmallVector<const SCEV *, 4> NewOps;
742 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
744 for (++I; I != E && I->first == CurLoop; ++I)
745 NewOps.push_back(I->second);
746 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
747 } else if (isNonConstantNegative(Op)) {
748 // Instead of doing a negate and add, just do a subtract.
749 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
750 Sum = InsertNoopCastOfTo(Sum, Ty);
751 Sum = InsertBinop(Instruction::Sub, Sum, W);
755 Value *W = expandCodeFor(Op, Ty);
756 Sum = InsertNoopCastOfTo(Sum, Ty);
757 // Canonicalize a constant to the RHS.
758 if (isa<Constant>(Sum)) std::swap(Sum, W);
759 Sum = InsertBinop(Instruction::Add, Sum, W);
767 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
768 Type *Ty = SE.getEffectiveSCEVType(S->getType());
770 // Collect all the mul operands in a loop, along with their associated loops.
771 // Iterate in reverse so that constants are emitted last, all else equal.
772 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
773 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
774 E(S->op_begin()); I != E; ++I)
775 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
777 // Sort by loop. Use a stable sort so that constants follow non-constants.
778 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
780 // Emit instructions to mul all the operands. Hoist as much as possible
783 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
784 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
785 const SCEV *Op = I->second;
787 // This is the first operand. Just expand it.
790 } else if (Op->isAllOnesValue()) {
791 // Instead of doing a multiply by negative one, just do a negate.
792 Prod = InsertNoopCastOfTo(Prod, Ty);
793 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
797 Value *W = expandCodeFor(Op, Ty);
798 Prod = InsertNoopCastOfTo(Prod, Ty);
799 // Canonicalize a constant to the RHS.
800 if (isa<Constant>(Prod)) std::swap(Prod, W);
801 Prod = InsertBinop(Instruction::Mul, Prod, W);
809 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
810 Type *Ty = SE.getEffectiveSCEVType(S->getType());
812 Value *LHS = expandCodeFor(S->getLHS(), Ty);
813 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
814 const APInt &RHS = SC->getValue()->getValue();
815 if (RHS.isPowerOf2())
816 return InsertBinop(Instruction::LShr, LHS,
817 ConstantInt::get(Ty, RHS.logBase2()));
820 Value *RHS = expandCodeFor(S->getRHS(), Ty);
821 return InsertBinop(Instruction::UDiv, LHS, RHS);
824 /// Move parts of Base into Rest to leave Base with the minimal
825 /// expression that provides a pointer operand suitable for a
827 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
828 ScalarEvolution &SE) {
829 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
830 Base = A->getStart();
831 Rest = SE.getAddExpr(Rest,
832 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
833 A->getStepRecurrence(SE),
835 // FIXME: A->getNoWrapFlags(FlagNW)
838 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
839 Base = A->getOperand(A->getNumOperands()-1);
840 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
841 NewAddOps.back() = Rest;
842 Rest = SE.getAddExpr(NewAddOps);
843 ExposePointerBase(Base, Rest, SE);
847 /// Determine if this is a well-behaved chain of instructions leading back to
848 /// the PHI. If so, it may be reused by expanded expressions.
849 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
851 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
852 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
854 // If any of the operands don't dominate the insert position, bail.
855 // Addrec operands are always loop-invariant, so this can only happen
856 // if there are instructions which haven't been hoisted.
857 if (L == IVIncInsertLoop) {
858 for (User::op_iterator OI = IncV->op_begin()+1,
859 OE = IncV->op_end(); OI != OE; ++OI)
860 if (Instruction *OInst = dyn_cast<Instruction>(OI))
861 if (!SE.DT->dominates(OInst, IVIncInsertPos))
864 // Advance to the next instruction.
865 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
869 if (IncV->mayHaveSideEffects())
875 return isNormalAddRecExprPHI(PN, IncV, L);
878 /// Determine if this cyclic phi is in a form that would have been generated by
879 /// LSR. We don't care if the phi was actually expanded in this pass, as long
880 /// as it is in a low-cost form, for example, no implied multiplication. This
881 /// should match any patterns generated by getAddRecExprPHILiterally and
883 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
884 const Loop *L, Type *ExpandTy) {
885 switch (IncV->getOpcode()) {
886 // Check for a simple Add/Sub or GEP of a loop invariant step.
887 case Instruction::Add:
888 case Instruction::Sub:
889 return IncV->getOperand(0) == PN
890 && L->isLoopInvariant(IncV->getOperand(1));
891 case Instruction::BitCast:
892 IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0));
895 // fall-thru to GEP handling
896 case Instruction::GetElementPtr: {
897 // This must be a pointer addition of constants (pretty) or some number of
898 // address-size elements (ugly).
899 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
901 if (isa<Constant>(*I))
903 // ugly geps have 2 operands.
904 // i1* is used by the expander to represent an address-size element.
905 if (IncV->getNumOperands() != 2)
907 unsigned AS = cast<PointerType>(ExpandTy)->getAddressSpace();
908 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
909 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
911 // Ensure the operands dominate the insertion point. I don't know of a
912 // case when this would not be true, so this is somewhat untested.
913 if (L == IVIncInsertLoop) {
914 for (User::op_iterator OI = IncV->op_begin()+1,
915 OE = IncV->op_end(); OI != OE; ++OI)
916 if (Instruction *OInst = dyn_cast<Instruction>(OI))
917 if (!SE.DT->dominates(OInst, IVIncInsertPos))
922 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
923 if (IncV && IncV->getOpcode() == Instruction::BitCast)
924 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
932 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
933 /// the base addrec, which is the addrec without any non-loop-dominating
934 /// values, and return the PHI.
936 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
940 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
942 // Reuse a previously-inserted PHI, if present.
943 BasicBlock *LatchBlock = L->getLoopLatch();
945 for (BasicBlock::iterator I = L->getHeader()->begin();
946 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
947 if (!SE.isSCEVable(PN->getType()) ||
948 (SE.getEffectiveSCEVType(PN->getType()) !=
949 SE.getEffectiveSCEVType(Normalized->getType())) ||
950 SE.getSCEV(PN) != Normalized)
954 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
957 if (!isExpandedAddRecExprPHI(PN, IncV, L, ExpandTy))
961 if (!isNormalAddRecExprPHI(PN, IncV, L))
964 // Ok, the add recurrence looks usable.
965 // Remember this PHI, even in post-inc mode.
966 InsertedValues.insert(PN);
967 // Remember the increment.
968 rememberInstruction(IncV);
969 if (L == IVIncInsertLoop)
971 if (SE.DT->dominates(IncV, IVIncInsertPos))
973 // Make sure the increment is where we want it. But don't move it
974 // down past a potential existing post-inc user.
975 IncV->moveBefore(IVIncInsertPos);
976 IVIncInsertPos = IncV;
977 IncV = cast<Instruction>(IncV->getOperand(0));
978 } while (IncV != PN);
983 // Save the original insertion point so we can restore it when we're done.
984 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
985 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
987 // Expand code for the start value.
988 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
989 L->getHeader()->begin());
991 // StartV must be hoisted into L's preheader to dominate the new phi.
992 assert(!isa<Instruction>(StartV) ||
993 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
996 // Expand code for the step value. Insert instructions right before the
997 // terminator corresponding to the back-edge. Do this before creating the PHI
998 // so that PHI reuse code doesn't see an incomplete PHI. If the stride is
999 // negative, insert a sub instead of an add for the increment (unless it's a
1000 // constant, because subtracts of constants are canonicalized to adds).
1001 const SCEV *Step = Normalized->getStepRecurrence(SE);
1002 bool isPointer = ExpandTy->isPointerTy();
1003 bool isNegative = !isPointer && isNonConstantNegative(Step);
1005 Step = SE.getNegativeSCEV(Step);
1006 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1009 BasicBlock *Header = L->getHeader();
1010 Builder.SetInsertPoint(Header, Header->begin());
1011 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1012 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1013 Twine(IVName) + ".iv");
1014 rememberInstruction(PN);
1016 // Create the step instructions and populate the PHI.
1017 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1018 BasicBlock *Pred = *HPI;
1020 // Add a start value.
1021 if (!L->contains(Pred)) {
1022 PN->addIncoming(StartV, Pred);
1026 // Create a step value and add it to the PHI. If IVIncInsertLoop is
1027 // non-null and equal to the addrec's loop, insert the instructions
1028 // at IVIncInsertPos.
1029 Instruction *InsertPos = L == IVIncInsertLoop ?
1030 IVIncInsertPos : Pred->getTerminator();
1031 Builder.SetInsertPoint(InsertPos);
1033 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1035 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1036 // If the step isn't constant, don't use an implicitly scaled GEP, because
1037 // that would require a multiply inside the loop.
1038 if (!isa<ConstantInt>(StepV))
1039 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1040 GEPPtrTy->getAddressSpace());
1041 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1042 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1043 if (IncV->getType() != PN->getType()) {
1044 IncV = Builder.CreateBitCast(IncV, PN->getType());
1045 rememberInstruction(IncV);
1049 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1050 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1051 rememberInstruction(IncV);
1053 PN->addIncoming(IncV, Pred);
1056 // Restore the original insert point.
1058 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1060 // Remember this PHI, even in post-inc mode.
1061 InsertedValues.insert(PN);
1066 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1067 Type *STy = S->getType();
1068 Type *IntTy = SE.getEffectiveSCEVType(STy);
1069 const Loop *L = S->getLoop();
1071 // Determine a normalized form of this expression, which is the expression
1072 // before any post-inc adjustment is made.
1073 const SCEVAddRecExpr *Normalized = S;
1074 if (PostIncLoops.count(L)) {
1075 PostIncLoopSet Loops;
1078 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1079 Loops, SE, *SE.DT));
1082 // Strip off any non-loop-dominating component from the addrec start.
1083 const SCEV *Start = Normalized->getStart();
1084 const SCEV *PostLoopOffset = 0;
1085 if (!SE.properlyDominates(Start, L->getHeader())) {
1086 PostLoopOffset = Start;
1087 Start = SE.getConstant(Normalized->getType(), 0);
1088 Normalized = cast<SCEVAddRecExpr>(
1089 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1090 Normalized->getLoop(),
1091 // FIXME: Normalized->getNoWrapFlags(FlagNW)
1092 SCEV::FlagAnyWrap));
1095 // Strip off any non-loop-dominating component from the addrec step.
1096 const SCEV *Step = Normalized->getStepRecurrence(SE);
1097 const SCEV *PostLoopScale = 0;
1098 if (!SE.dominates(Step, L->getHeader())) {
1099 PostLoopScale = Step;
1100 Step = SE.getConstant(Normalized->getType(), 1);
1102 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
1103 Normalized->getLoop(),
1104 // FIXME: Normalized
1105 // ->getNoWrapFlags(FlagNW)
1106 SCEV::FlagAnyWrap));
1109 // Expand the core addrec. If we need post-loop scaling, force it to
1110 // expand to an integer type to avoid the need for additional casting.
1111 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1112 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1114 // Accommodate post-inc mode, if necessary.
1116 if (!PostIncLoops.count(L))
1119 // In PostInc mode, use the post-incremented value.
1120 BasicBlock *LatchBlock = L->getLoopLatch();
1121 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1122 Result = PN->getIncomingValueForBlock(LatchBlock);
1125 // Re-apply any non-loop-dominating scale.
1126 if (PostLoopScale) {
1127 Result = InsertNoopCastOfTo(Result, IntTy);
1128 Result = Builder.CreateMul(Result,
1129 expandCodeFor(PostLoopScale, IntTy));
1130 rememberInstruction(Result);
1133 // Re-apply any non-loop-dominating offset.
1134 if (PostLoopOffset) {
1135 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1136 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1137 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1139 Result = InsertNoopCastOfTo(Result, IntTy);
1140 Result = Builder.CreateAdd(Result,
1141 expandCodeFor(PostLoopOffset, IntTy));
1142 rememberInstruction(Result);
1149 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1150 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1152 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1153 const Loop *L = S->getLoop();
1155 // First check for an existing canonical IV in a suitable type.
1156 PHINode *CanonicalIV = 0;
1157 if (PHINode *PN = L->getCanonicalInductionVariable())
1158 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1161 // Rewrite an AddRec in terms of the canonical induction variable, if
1162 // its type is more narrow.
1164 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1165 SE.getTypeSizeInBits(Ty)) {
1166 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1167 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1168 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1169 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1170 // FIXME: S->getNoWrapFlags(FlagNW)
1171 SCEV::FlagAnyWrap));
1172 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1173 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1174 BasicBlock::iterator NewInsertPt =
1175 llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1176 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1177 isa<LandingPadInst>(NewInsertPt))
1179 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1181 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1185 // {X,+,F} --> X + {0,+,F}
1186 if (!S->getStart()->isZero()) {
1187 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1188 NewOps[0] = SE.getConstant(Ty, 0);
1189 // FIXME: can use S->getNoWrapFlags()
1190 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
1192 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1193 // comments on expandAddToGEP for details.
1194 const SCEV *Base = S->getStart();
1195 const SCEV *RestArray[1] = { Rest };
1196 // Dig into the expression to find the pointer base for a GEP.
1197 ExposePointerBase(Base, RestArray[0], SE);
1198 // If we found a pointer, expand the AddRec with a GEP.
1199 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1200 // Make sure the Base isn't something exotic, such as a multiplied
1201 // or divided pointer value. In those cases, the result type isn't
1202 // actually a pointer type.
1203 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1204 Value *StartV = expand(Base);
1205 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1206 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1210 // Just do a normal add. Pre-expand the operands to suppress folding.
1211 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1212 SE.getUnknown(expand(Rest))));
1215 // If we don't yet have a canonical IV, create one.
1217 // Create and insert the PHI node for the induction variable in the
1219 BasicBlock *Header = L->getHeader();
1220 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1221 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1223 rememberInstruction(CanonicalIV);
1225 Constant *One = ConstantInt::get(Ty, 1);
1226 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1227 BasicBlock *HP = *HPI;
1228 if (L->contains(HP)) {
1229 // Insert a unit add instruction right before the terminator
1230 // corresponding to the back-edge.
1231 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1233 HP->getTerminator());
1234 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1235 rememberInstruction(Add);
1236 CanonicalIV->addIncoming(Add, HP);
1238 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1243 // {0,+,1} --> Insert a canonical induction variable into the loop!
1244 if (S->isAffine() && S->getOperand(1)->isOne()) {
1245 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1246 "IVs with types different from the canonical IV should "
1247 "already have been handled!");
1251 // {0,+,F} --> {0,+,1} * F
1253 // If this is a simple linear addrec, emit it now as a special case.
1254 if (S->isAffine()) // {0,+,F} --> i*F
1256 expand(SE.getTruncateOrNoop(
1257 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1258 SE.getNoopOrAnyExtend(S->getOperand(1),
1259 CanonicalIV->getType())),
1262 // If this is a chain of recurrences, turn it into a closed form, using the
1263 // folders, then expandCodeFor the closed form. This allows the folders to
1264 // simplify the expression without having to build a bunch of special code
1265 // into this folder.
1266 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1268 // Promote S up to the canonical IV type, if the cast is foldable.
1269 const SCEV *NewS = S;
1270 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1271 if (isa<SCEVAddRecExpr>(Ext))
1274 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1275 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1277 // Truncate the result down to the original type, if needed.
1278 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1282 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1283 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1284 Value *V = expandCodeFor(S->getOperand(),
1285 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1286 Value *I = Builder.CreateTrunc(V, Ty);
1287 rememberInstruction(I);
1291 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1292 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1293 Value *V = expandCodeFor(S->getOperand(),
1294 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1295 Value *I = Builder.CreateZExt(V, Ty);
1296 rememberInstruction(I);
1300 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1301 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1302 Value *V = expandCodeFor(S->getOperand(),
1303 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1304 Value *I = Builder.CreateSExt(V, Ty);
1305 rememberInstruction(I);
1309 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1310 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1311 Type *Ty = LHS->getType();
1312 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1313 // In the case of mixed integer and pointer types, do the
1314 // rest of the comparisons as integer.
1315 if (S->getOperand(i)->getType() != Ty) {
1316 Ty = SE.getEffectiveSCEVType(Ty);
1317 LHS = InsertNoopCastOfTo(LHS, Ty);
1319 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1320 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1321 rememberInstruction(ICmp);
1322 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1323 rememberInstruction(Sel);
1326 // In the case of mixed integer and pointer types, cast the
1327 // final result back to the pointer type.
1328 if (LHS->getType() != S->getType())
1329 LHS = InsertNoopCastOfTo(LHS, S->getType());
1333 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1334 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1335 Type *Ty = LHS->getType();
1336 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1337 // In the case of mixed integer and pointer types, do the
1338 // rest of the comparisons as integer.
1339 if (S->getOperand(i)->getType() != Ty) {
1340 Ty = SE.getEffectiveSCEVType(Ty);
1341 LHS = InsertNoopCastOfTo(LHS, Ty);
1343 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1344 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1345 rememberInstruction(ICmp);
1346 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1347 rememberInstruction(Sel);
1350 // In the case of mixed integer and pointer types, cast the
1351 // final result back to the pointer type.
1352 if (LHS->getType() != S->getType())
1353 LHS = InsertNoopCastOfTo(LHS, S->getType());
1357 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1359 BasicBlock::iterator IP = I;
1360 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
1362 Builder.SetInsertPoint(IP->getParent(), IP);
1363 return expandCodeFor(SH, Ty);
1366 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1367 // Expand the code for this SCEV.
1368 Value *V = expand(SH);
1370 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1371 "non-trivial casts should be done with the SCEVs directly!");
1372 V = InsertNoopCastOfTo(V, Ty);
1377 Value *SCEVExpander::expand(const SCEV *S) {
1378 // Compute an insertion point for this SCEV object. Hoist the instructions
1379 // as far out in the loop nest as possible.
1380 Instruction *InsertPt = Builder.GetInsertPoint();
1381 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1382 L = L->getParentLoop())
1383 if (SE.isLoopInvariant(S, L)) {
1385 if (BasicBlock *Preheader = L->getLoopPreheader())
1386 InsertPt = Preheader->getTerminator();
1388 // If the SCEV is computable at this level, insert it into the header
1389 // after the PHIs (and after any other instructions that we've inserted
1390 // there) so that it is guaranteed to dominate any user inside the loop.
1391 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1392 InsertPt = L->getHeader()->getFirstInsertionPt();
1393 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
1394 InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1398 // Check to see if we already expanded this here.
1399 std::map<std::pair<const SCEV *, Instruction *>,
1400 AssertingVH<Value> >::iterator I =
1401 InsertedExpressions.find(std::make_pair(S, InsertPt));
1402 if (I != InsertedExpressions.end())
1405 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1406 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1407 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1409 // Expand the expression into instructions.
1410 Value *V = visit(S);
1412 // Remember the expanded value for this SCEV at this location.
1413 if (PostIncLoops.empty())
1414 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1416 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1420 void SCEVExpander::rememberInstruction(Value *I) {
1421 if (!PostIncLoops.empty())
1422 InsertedPostIncValues.insert(I);
1424 InsertedValues.insert(I);
1426 // If we just claimed an existing instruction and that instruction had
1427 // been the insert point, adjust the insert point forward so that
1428 // subsequently inserted code will be dominated.
1429 if (Builder.GetInsertPoint() == I) {
1430 BasicBlock::iterator It = cast<Instruction>(I);
1431 do { ++It; } while (isInsertedInstruction(It) ||
1432 isa<DbgInfoIntrinsic>(It));
1433 Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
1437 void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
1438 // If we acquired more instructions since the old insert point was saved,
1439 // advance past them.
1440 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
1442 Builder.SetInsertPoint(BB, I);
1445 /// getOrInsertCanonicalInductionVariable - This method returns the
1446 /// canonical induction variable of the specified type for the specified
1447 /// loop (inserting one if there is none). A canonical induction variable
1448 /// starts at zero and steps by one on each iteration.
1450 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1452 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1454 // Build a SCEV for {0,+,1}<L>.
1455 // Conservatively use FlagAnyWrap for now.
1456 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1457 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1459 // Emit code for it.
1460 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1461 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1462 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1464 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1469 /// hoistStep - Attempt to hoist an IV increment above a potential use.
1471 /// To successfully hoist, two criteria must be met:
1472 /// - IncV operands dominate InsertPos and
1473 /// - InsertPos dominates IncV
1475 /// Meeting the second condition means that we don't need to check all of IncV's
1476 /// existing uses (it's moving up in the domtree).
1478 /// This does not yet recursively hoist the operands, although that would
1479 /// not be difficult.
1481 /// This does not require a SCEVExpander instance and could be replaced by a
1482 /// general code-insertion helper.
1483 bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
1484 const DominatorTree *DT) {
1485 if (DT->dominates(IncV, InsertPos))
1488 if (!DT->dominates(InsertPos->getParent(), IncV->getParent()))
1491 if (IncV->mayHaveSideEffects())
1494 // Attempt to hoist IncV
1495 for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end();
1497 Instruction *OInst = dyn_cast<Instruction>(OI);
1498 if (OInst && !DT->dominates(OInst, InsertPos))
1501 IncV->moveBefore(InsertPos);
1505 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1506 /// replace them with their most canonical representative. Return the number of
1507 /// phis eliminated.
1509 /// This does not depend on any SCEVExpander state but should be used in
1510 /// the same context that SCEVExpander is used.
1511 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1512 SmallVectorImpl<WeakVH> &DeadInsts) {
1513 unsigned NumElim = 0;
1514 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1515 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
1516 PHINode *Phi = cast<PHINode>(I);
1517 if (!SE.isSCEVable(Phi->getType()))
1520 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1526 // If one phi derives from the other via GEPs, types may differ.
1527 // We could consider adding a bitcast here to handle it.
1528 if (OrigPhiRef->getType() != Phi->getType())
1531 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1532 Instruction *OrigInc =
1533 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1534 Instruction *IsomorphicInc =
1535 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1537 // If this phi is more canonical, swap it with the original.
1538 if (!isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L,
1539 OrigPhiRef->getType())
1540 && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L, Phi->getType())) {
1541 std::swap(OrigPhiRef, Phi);
1542 std::swap(OrigInc, IsomorphicInc);
1544 // Replacing the congruent phi is sufficient because acyclic redundancy
1545 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1546 // that a phi is congruent, it's often the head of an IV user cycle that
1547 // is isomorphic with the original phi. So it's worth eagerly cleaning up
1548 // the common case of a single IV increment.
1549 if (OrigInc != IsomorphicInc &&
1550 OrigInc->getType() == IsomorphicInc->getType() &&
1551 SE.getSCEV(OrigInc) == SE.getSCEV(IsomorphicInc) &&
1552 hoistStep(OrigInc, IsomorphicInc, DT)) {
1553 DEBUG_WITH_TYPE(DebugType, dbgs()
1554 << "INDVARS: Eliminated congruent iv.inc: "
1555 << *IsomorphicInc << '\n');
1556 IsomorphicInc->replaceAllUsesWith(OrigInc);
1557 DeadInsts.push_back(IsomorphicInc);
1560 DEBUG_WITH_TYPE(DebugType, dbgs()
1561 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1563 Phi->replaceAllUsesWith(OrigPhiRef);
1564 DeadInsts.push_back(Phi);