1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/ADT/STLExtras.h"
22 /// InsertCastOfTo - Insert a cast of V to the specified type, doing what
23 /// we can to share the casts.
24 Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V,
26 // Short-circuit unnecessary bitcasts.
27 if (opcode == Instruction::BitCast && V->getType() == Ty)
30 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
31 if ((opcode == Instruction::PtrToInt || opcode == Instruction::IntToPtr) &&
32 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
33 if (CastInst *CI = dyn_cast<CastInst>(V))
34 if ((CI->getOpcode() == Instruction::PtrToInt ||
35 CI->getOpcode() == Instruction::IntToPtr) &&
36 SE.getTypeSizeInBits(CI->getType()) ==
37 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
38 return CI->getOperand(0);
39 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
40 if ((CE->getOpcode() == Instruction::PtrToInt ||
41 CE->getOpcode() == Instruction::IntToPtr) &&
42 SE.getTypeSizeInBits(CE->getType()) ==
43 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
44 return CE->getOperand(0);
47 // FIXME: keep track of the cast instruction.
48 if (Constant *C = dyn_cast<Constant>(V))
49 return ConstantExpr::getCast(opcode, C, Ty);
51 if (Argument *A = dyn_cast<Argument>(V)) {
52 // Check to see if there is already a cast!
53 for (Value::use_iterator UI = A->use_begin(), E = A->use_end();
55 if ((*UI)->getType() == Ty)
56 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
57 if (CI->getOpcode() == opcode) {
58 // If the cast isn't the first instruction of the function, move it.
59 if (BasicBlock::iterator(CI) !=
60 A->getParent()->getEntryBlock().begin()) {
61 // If the CastInst is the insert point, change the insert point.
62 if (CI == InsertPt) ++InsertPt;
63 // Splice the cast at the beginning of the entry block.
64 CI->moveBefore(A->getParent()->getEntryBlock().begin());
69 Instruction *I = CastInst::Create(opcode, V, Ty, V->getName(),
70 A->getParent()->getEntryBlock().begin());
71 InsertedValues.insert(I);
75 Instruction *I = cast<Instruction>(V);
77 // Check to see if there is already a cast. If there is, use it.
78 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
80 if ((*UI)->getType() == Ty)
81 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
82 if (CI->getOpcode() == opcode) {
83 BasicBlock::iterator It = I; ++It;
84 if (isa<InvokeInst>(I))
85 It = cast<InvokeInst>(I)->getNormalDest()->begin();
86 while (isa<PHINode>(It)) ++It;
87 if (It != BasicBlock::iterator(CI)) {
88 // If the CastInst is the insert point, change the insert point.
89 if (CI == InsertPt) ++InsertPt;
90 // Splice the cast immediately after the operand in question.
96 BasicBlock::iterator IP = I; ++IP;
97 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
98 IP = II->getNormalDest()->begin();
99 while (isa<PHINode>(IP)) ++IP;
100 Instruction *CI = CastInst::Create(opcode, V, Ty, V->getName(), IP);
101 InsertedValues.insert(CI);
105 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
106 /// which must be possible with a noop cast.
107 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
108 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
109 assert((Op == Instruction::BitCast ||
110 Op == Instruction::PtrToInt ||
111 Op == Instruction::IntToPtr) &&
112 "InsertNoopCastOfTo cannot perform non-noop casts!");
113 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
114 "InsertNoopCastOfTo cannot change sizes!");
115 return InsertCastOfTo(Op, V, Ty);
118 /// InsertBinop - Insert the specified binary operator, doing a small amount
119 /// of work to avoid inserting an obviously redundant operation.
120 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
121 Value *RHS, BasicBlock::iterator InsertPt) {
122 // Fold a binop with constant operands.
123 if (Constant *CLHS = dyn_cast<Constant>(LHS))
124 if (Constant *CRHS = dyn_cast<Constant>(RHS))
125 return ConstantExpr::get(Opcode, CLHS, CRHS);
127 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
128 unsigned ScanLimit = 6;
129 BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin();
130 if (InsertPt != BlockBegin) {
131 // Scanning starts from the last instruction before InsertPt.
132 BasicBlock::iterator IP = InsertPt;
134 for (; ScanLimit; --IP, --ScanLimit) {
135 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
136 IP->getOperand(1) == RHS)
138 if (IP == BlockBegin) break;
142 // If we haven't found this binop, insert it.
143 Instruction *BO = BinaryOperator::Create(Opcode, LHS, RHS, "tmp", InsertPt);
144 InsertedValues.insert(BO);
148 /// FactorOutConstant - Test if S is divisible by Factor, using signed
149 /// division. If so, update S with Factor divided out and return true.
150 /// S need not be evenly divisble if a reasonable remainder can be
152 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
153 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
154 /// check to see if the divide was folded.
155 static bool FactorOutConstant(SCEVHandle &S,
156 SCEVHandle &Remainder,
158 ScalarEvolution &SE) {
159 // Everything is divisible by one.
163 // For a Constant, check for a multiple of the given factor.
164 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
166 ConstantInt::get(C->getValue()->getValue().sdiv(Factor));
167 // If the quotient is zero and the remainder is non-zero, reject
168 // the value at this scale. It will be considered for subsequent
170 if (C->isZero() || !CI->isZero()) {
171 SCEVHandle Div = SE.getConstant(CI);
174 SE.getAddExpr(Remainder,
175 SE.getConstant(C->getValue()->getValue().srem(Factor)));
180 // In a Mul, check if there is a constant operand which is a multiple
181 // of the given factor.
182 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S))
183 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
184 if (!C->getValue()->getValue().srem(Factor)) {
185 std::vector<SCEVHandle> NewMulOps(M->getOperands());
187 SE.getConstant(C->getValue()->getValue().sdiv(Factor));
188 S = SE.getMulExpr(NewMulOps);
192 // In an AddRec, check if both start and step are divisible.
193 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
194 SCEVHandle Step = A->getStepRecurrence(SE);
195 SCEVHandle StepRem = SE.getIntegerSCEV(0, Step->getType());
196 if (!FactorOutConstant(Step, StepRem, Factor, SE))
198 if (!StepRem->isZero())
200 SCEVHandle Start = A->getStart();
201 if (!FactorOutConstant(Start, Remainder, Factor, SE))
203 S = SE.getAddRecExpr(Start, Step, A->getLoop());
210 /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
211 /// instead of using ptrtoint+arithmetic+inttoptr. This helps
212 /// BasicAliasAnalysis analyze the result. However, it suffers from the
213 /// underlying bug described in PR2831. Addition in LLVM currently always
214 /// has two's complement wrapping guaranteed. However, the semantics for
215 /// getelementptr overflow are ambiguous. In the common case though, this
216 /// expansion gets used when a GEP in the original code has been converted
217 /// into integer arithmetic, in which case the resulting code will be no
218 /// more undefined than it was originally.
220 /// Design note: It might seem desirable for this function to be more
221 /// loop-aware. If some of the indices are loop-invariant while others
222 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
223 /// loop-invariant portions of the overall computation outside the loop.
224 /// However, there are a few reasons this is not done here. Hoisting simple
225 /// arithmetic is a low-level optimization that often isn't very
226 /// important until late in the optimization process. In fact, passes
227 /// like InstructionCombining will combine GEPs, even if it means
228 /// pushing loop-invariant computation down into loops, so even if the
229 /// GEPs were split here, the work would quickly be undone. The
230 /// LoopStrengthReduction pass, which is usually run quite late (and
231 /// after the last InstructionCombining pass), takes care of hoisting
232 /// loop-invariant portions of expressions, after considering what
233 /// can be folded using target addressing modes.
235 Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin,
236 const SCEVHandle *op_end,
237 const PointerType *PTy,
240 const Type *ElTy = PTy->getElementType();
241 SmallVector<Value *, 4> GepIndices;
242 std::vector<SCEVHandle> Ops(op_begin, op_end);
243 bool AnyNonZeroIndices = false;
245 // Decend down the pointer's type and attempt to convert the other
246 // operands into GEP indices, at each level. The first index in a GEP
247 // indexes into the array implied by the pointer operand; the rest of
248 // the indices index into the element or field type selected by the
251 APInt ElSize = APInt(SE.getTypeSizeInBits(Ty),
252 ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0);
253 std::vector<SCEVHandle> NewOps;
254 std::vector<SCEVHandle> ScaledOps;
255 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
256 // Split AddRecs up into parts as either of the parts may be usable
257 // without the other.
258 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i]))
259 if (!A->getStart()->isZero()) {
260 SCEVHandle Start = A->getStart();
261 Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()),
262 A->getStepRecurrence(SE),
267 // If the scale size is not 0, attempt to factor out a scale.
269 SCEVHandle Op = Ops[i];
270 SCEVHandle Remainder = SE.getIntegerSCEV(0, Op->getType());
271 if (FactorOutConstant(Op, Remainder, ElSize, SE)) {
272 ScaledOps.push_back(Op); // Op now has ElSize factored out.
273 NewOps.push_back(Remainder);
277 // If the operand was not divisible, add it to the list of operands
278 // we'll scan next iteration.
279 NewOps.push_back(Ops[i]);
282 AnyNonZeroIndices |= !ScaledOps.empty();
283 Value *Scaled = ScaledOps.empty() ?
284 Constant::getNullValue(Ty) :
285 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
286 GepIndices.push_back(Scaled);
288 // Collect struct field index operands.
290 while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
291 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
292 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
293 const StructLayout &SL = *SE.TD->getStructLayout(STy);
294 uint64_t FullOffset = C->getValue()->getZExtValue();
295 if (FullOffset < SL.getSizeInBytes()) {
296 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
297 GepIndices.push_back(ConstantInt::get(Type::Int32Ty, ElIdx));
298 ElTy = STy->getTypeAtIndex(ElIdx);
300 SE.getConstant(ConstantInt::get(Ty,
302 SL.getElementOffset(ElIdx)));
303 AnyNonZeroIndices = true;
310 if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) {
311 ElTy = ATy->getElementType();
317 // If none of the operands were convertable to proper GEP indices, cast
318 // the base to i8* and do an ugly getelementptr with that. It's still
319 // better than ptrtoint+arithmetic+inttoptr at least.
320 if (!AnyNonZeroIndices) {
321 V = InsertNoopCastOfTo(V,
322 Type::Int8Ty->getPointerTo(PTy->getAddressSpace()));
323 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
325 // Fold a GEP with constant operands.
326 if (Constant *CLHS = dyn_cast<Constant>(V))
327 if (Constant *CRHS = dyn_cast<Constant>(Idx))
328 return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1);
330 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
331 unsigned ScanLimit = 6;
332 BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin();
333 if (InsertPt != BlockBegin) {
334 // Scanning starts from the last instruction before InsertPt.
335 BasicBlock::iterator IP = InsertPt;
337 for (; ScanLimit; --IP, --ScanLimit) {
338 if (IP->getOpcode() == Instruction::GetElementPtr &&
339 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
341 if (IP == BlockBegin) break;
345 Value *GEP = GetElementPtrInst::Create(V, Idx, "scevgep", InsertPt);
346 InsertedValues.insert(GEP);
350 // Insert a pretty getelementptr.
351 Value *GEP = GetElementPtrInst::Create(V,
354 "scevgep", InsertPt);
355 Ops.push_back(SE.getUnknown(GEP));
356 InsertedValues.insert(GEP);
357 return expand(SE.getAddExpr(Ops));
360 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
361 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
362 Value *V = expand(S->getOperand(S->getNumOperands()-1));
364 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
365 // comments on expandAddToGEP for details.
367 if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) {
368 const std::vector<SCEVHandle> &Ops = S->getOperands();
369 return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1],
373 V = InsertNoopCastOfTo(V, Ty);
375 // Emit a bunch of add instructions
376 for (int i = S->getNumOperands()-2; i >= 0; --i) {
377 Value *W = expandCodeFor(S->getOperand(i), Ty);
378 V = InsertBinop(Instruction::Add, V, W, InsertPt);
383 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
384 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
385 int FirstOp = 0; // Set if we should emit a subtract.
386 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
387 if (SC->getValue()->isAllOnesValue())
390 int i = S->getNumOperands()-2;
391 Value *V = expandCodeFor(S->getOperand(i+1), Ty);
393 // Emit a bunch of multiply instructions
394 for (; i >= FirstOp; --i) {
395 Value *W = expandCodeFor(S->getOperand(i), Ty);
396 V = InsertBinop(Instruction::Mul, V, W, InsertPt);
399 // -1 * ... ---> 0 - ...
401 V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V, InsertPt);
405 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
406 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
408 Value *LHS = expandCodeFor(S->getLHS(), Ty);
409 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
410 const APInt &RHS = SC->getValue()->getValue();
411 if (RHS.isPowerOf2())
412 return InsertBinop(Instruction::LShr, LHS,
413 ConstantInt::get(Ty, RHS.logBase2()),
417 Value *RHS = expandCodeFor(S->getRHS(), Ty);
418 return InsertBinop(Instruction::UDiv, LHS, RHS, InsertPt);
421 /// Move parts of Base into Rest to leave Base with the minimal
422 /// expression that provides a pointer operand suitable for a
424 static void ExposePointerBase(SCEVHandle &Base, SCEVHandle &Rest,
425 ScalarEvolution &SE) {
426 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
427 Base = A->getStart();
428 Rest = SE.getAddExpr(Rest,
429 SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()),
430 A->getStepRecurrence(SE),
433 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
434 Base = A->getOperand(A->getNumOperands()-1);
435 std::vector<SCEVHandle> NewAddOps(A->op_begin(), A->op_end());
436 NewAddOps.back() = Rest;
437 Rest = SE.getAddExpr(NewAddOps);
438 ExposePointerBase(Base, Rest, SE);
442 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
443 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
444 const Loop *L = S->getLoop();
446 // First check for an existing canonical IV in a suitable type.
447 PHINode *CanonicalIV = 0;
448 if (PHINode *PN = L->getCanonicalInductionVariable())
449 if (SE.isSCEVable(PN->getType()) &&
450 isa<IntegerType>(SE.getEffectiveSCEVType(PN->getType())) &&
451 SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
454 // Rewrite an AddRec in terms of the canonical induction variable, if
455 // its type is more narrow.
457 SE.getTypeSizeInBits(CanonicalIV->getType()) >
458 SE.getTypeSizeInBits(Ty)) {
459 SCEVHandle Start = SE.getAnyExtendExpr(S->getStart(),
460 CanonicalIV->getType());
461 SCEVHandle Step = SE.getAnyExtendExpr(S->getStepRecurrence(SE),
462 CanonicalIV->getType());
463 Value *V = expand(SE.getAddRecExpr(Start, Step, S->getLoop()));
464 BasicBlock::iterator SaveInsertPt = getInsertionPoint();
465 BasicBlock::iterator NewInsertPt =
466 next(BasicBlock::iterator(cast<Instruction>(V)));
467 while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
468 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
470 setInsertionPoint(SaveInsertPt);
474 // {X,+,F} --> X + {0,+,F}
475 if (!S->getStart()->isZero()) {
476 std::vector<SCEVHandle> NewOps(S->getOperands());
477 NewOps[0] = SE.getIntegerSCEV(0, Ty);
478 SCEVHandle Rest = SE.getAddRecExpr(NewOps, L);
480 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
481 // comments on expandAddToGEP for details.
483 SCEVHandle Base = S->getStart();
484 SCEVHandle RestArray[1] = { Rest };
485 // Dig into the expression to find the pointer base for a GEP.
486 ExposePointerBase(Base, RestArray[0], SE);
487 // If we found a pointer, expand the AddRec with a GEP.
488 if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
489 // Make sure the Base isn't something exotic, such as a multiplied
490 // or divided pointer value. In those cases, the result type isn't
491 // actually a pointer type.
492 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
493 Value *StartV = expand(Base);
494 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
495 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
500 Value *RestV = expand(Rest);
501 return expand(SE.getAddExpr(S->getStart(), SE.getUnknown(RestV)));
504 // {0,+,1} --> Insert a canonical induction variable into the loop!
506 S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) {
507 // If there's a canonical IV, just use it.
509 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
510 "IVs with types different from the canonical IV should "
511 "already have been handled!");
515 // Create and insert the PHI node for the induction variable in the
517 BasicBlock *Header = L->getHeader();
518 PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin());
519 InsertedValues.insert(PN);
520 PN->addIncoming(Constant::getNullValue(Ty), L->getLoopPreheader());
522 pred_iterator HPI = pred_begin(Header);
523 assert(HPI != pred_end(Header) && "Loop with zero preds???");
524 if (!L->contains(*HPI)) ++HPI;
525 assert(HPI != pred_end(Header) && L->contains(*HPI) &&
526 "No backedge in loop?");
528 // Insert a unit add instruction right before the terminator corresponding
530 Constant *One = ConstantInt::get(Ty, 1);
531 Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
532 (*HPI)->getTerminator());
533 InsertedValues.insert(Add);
535 pred_iterator PI = pred_begin(Header);
536 if (*PI == L->getLoopPreheader())
538 PN->addIncoming(Add, *PI);
542 // {0,+,F} --> {0,+,1} * F
543 // Get the canonical induction variable I for this loop.
544 Value *I = CanonicalIV ?
546 getOrInsertCanonicalInductionVariable(L, Ty);
548 // If this is a simple linear addrec, emit it now as a special case.
549 if (S->isAffine()) { // {0,+,F} --> i*F
550 Value *F = expandCodeFor(S->getOperand(1), Ty);
552 // If the insert point is directly inside of the loop, emit the multiply at
553 // the insert point. Otherwise, L is a loop that is a parent of the insert
554 // point loop. If we can, move the multiply to the outer most loop that it
556 BasicBlock::iterator MulInsertPt = getInsertionPoint();
557 Loop *InsertPtLoop = SE.LI->getLoopFor(MulInsertPt->getParent());
558 if (InsertPtLoop != L && InsertPtLoop &&
559 L->contains(InsertPtLoop->getHeader())) {
561 // If we cannot hoist the multiply out of this loop, don't.
562 if (!InsertPtLoop->isLoopInvariant(F)) break;
564 BasicBlock *InsertPtLoopPH = InsertPtLoop->getLoopPreheader();
566 // If this loop hasn't got a preheader, we aren't able to hoist the
571 // Otherwise, move the insert point to the preheader.
572 MulInsertPt = InsertPtLoopPH->getTerminator();
573 InsertPtLoop = InsertPtLoop->getParentLoop();
574 } while (InsertPtLoop != L);
577 return InsertBinop(Instruction::Mul, I, F, MulInsertPt);
580 // If this is a chain of recurrences, turn it into a closed form, using the
581 // folders, then expandCodeFor the closed form. This allows the folders to
582 // simplify the expression without having to build a bunch of special code
584 SCEVHandle IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV.
586 // Promote S up to the canonical IV type, if the cast is foldable.
588 SCEVHandle Ext = SE.getNoopOrAnyExtend(S, I->getType());
589 if (isa<SCEVAddRecExpr>(Ext))
592 SCEVHandle V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
593 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
595 // Truncate the result down to the original type, if needed.
596 SCEVHandle T = SE.getTruncateOrNoop(V, Ty);
600 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
601 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
602 Value *V = expandCodeFor(S->getOperand(),
603 SE.getEffectiveSCEVType(S->getOperand()->getType()));
604 Instruction *I = new TruncInst(V, Ty, "tmp.", InsertPt);
605 InsertedValues.insert(I);
609 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
610 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
611 Value *V = expandCodeFor(S->getOperand(),
612 SE.getEffectiveSCEVType(S->getOperand()->getType()));
613 Instruction *I = new ZExtInst(V, Ty, "tmp.", InsertPt);
614 InsertedValues.insert(I);
618 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
619 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
620 Value *V = expandCodeFor(S->getOperand(),
621 SE.getEffectiveSCEVType(S->getOperand()->getType()));
622 Instruction *I = new SExtInst(V, Ty, "tmp.", InsertPt);
623 InsertedValues.insert(I);
627 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
628 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
629 Value *LHS = expandCodeFor(S->getOperand(0), Ty);
630 for (unsigned i = 1; i < S->getNumOperands(); ++i) {
631 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
633 new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS, "tmp", InsertPt);
634 InsertedValues.insert(ICmp);
635 Instruction *Sel = SelectInst::Create(ICmp, LHS, RHS, "smax", InsertPt);
636 InsertedValues.insert(Sel);
642 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
643 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
644 Value *LHS = expandCodeFor(S->getOperand(0), Ty);
645 for (unsigned i = 1; i < S->getNumOperands(); ++i) {
646 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
648 new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS, "tmp", InsertPt);
649 InsertedValues.insert(ICmp);
650 Instruction *Sel = SelectInst::Create(ICmp, LHS, RHS, "umax", InsertPt);
651 InsertedValues.insert(Sel);
657 Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty) {
658 // Expand the code for this SCEV.
659 Value *V = expand(SH);
661 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
662 "non-trivial casts should be done with the SCEVs directly!");
663 V = InsertNoopCastOfTo(V, Ty);
668 Value *SCEVExpander::expand(const SCEV *S) {
669 // Check to see if we already expanded this.
670 std::map<SCEVHandle, AssertingVH<Value> >::iterator I =
671 InsertedExpressions.find(S);
672 if (I != InsertedExpressions.end())
676 InsertedExpressions[S] = V;
680 /// getOrInsertCanonicalInductionVariable - This method returns the
681 /// canonical induction variable of the specified type for the specified
682 /// loop (inserting one if there is none). A canonical induction variable
683 /// starts at zero and steps by one on each iteration.
685 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
687 assert(Ty->isInteger() && "Can only insert integer induction variables!");
688 SCEVHandle H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty),
689 SE.getIntegerSCEV(1, Ty), L);