1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inline cost analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/Support/CallSite.h"
16 #include "llvm/CallingConv.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/ADT/SmallPtrSet.h"
23 /// callIsSmall - If a call is likely to lower to a single target instruction,
24 /// or is otherwise deemed small return true.
25 /// TODO: Perhaps calls like memcpy, strcpy, etc?
26 bool llvm::callIsSmall(const Function *F) {
29 if (F->hasLocalLinkage()) return false;
31 if (!F->hasName()) return false;
33 StringRef Name = F->getName();
35 // These will all likely lower to a single selection DAG node.
36 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
37 Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
38 Name == "sin" || Name == "sinf" || Name == "sinl" ||
39 Name == "cos" || Name == "cosf" || Name == "cosl" ||
40 Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
43 // These are all likely to be optimized into something smaller.
44 if (Name == "pow" || Name == "powf" || Name == "powl" ||
45 Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
46 Name == "floor" || Name == "floorf" || Name == "ceil" ||
47 Name == "round" || Name == "ffs" || Name == "ffsl" ||
48 Name == "abs" || Name == "labs" || Name == "llabs")
54 /// analyzeBasicBlock - Fill in the current structure with information gleaned
55 /// from the specified block.
56 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
57 const TargetData *TD) {
59 unsigned NumInstsBeforeThisBB = NumInsts;
60 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
62 if (isa<PHINode>(II)) continue; // PHI nodes don't count.
64 // Special handling for calls.
65 if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
66 if (const IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(II)) {
67 switch (IntrinsicI->getIntrinsicID()) {
69 case Intrinsic::dbg_declare:
70 case Intrinsic::dbg_value:
71 case Intrinsic::invariant_start:
72 case Intrinsic::invariant_end:
73 case Intrinsic::lifetime_start:
74 case Intrinsic::lifetime_end:
75 case Intrinsic::objectsize:
76 case Intrinsic::ptr_annotation:
77 case Intrinsic::var_annotation:
78 // These intrinsics don't count as size.
83 ImmutableCallSite CS(cast<Instruction>(II));
85 if (const Function *F = CS.getCalledFunction()) {
86 // If a function is both internal and has a single use, then it is
87 // extremely likely to get inlined in the future (it was probably
88 // exposed by an interleaved devirtualization pass).
89 if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
90 ++NumInlineCandidates;
92 // If this call is to function itself, then the function is recursive.
93 // Inlining it into other functions is a bad idea, because this is
94 // basically just a form of loop peeling, and our metrics aren't useful
96 if (F == BB->getParent())
100 if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
101 // Each argument to a call takes on average one instruction to set up.
102 NumInsts += CS.arg_size();
104 // We don't want inline asm to count as a call - that would prevent loop
105 // unrolling. The argument setup cost is still real, though.
106 if (!isa<InlineAsm>(CS.getCalledValue()))
111 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
112 if (!AI->isStaticAlloca())
113 this->usesDynamicAlloca = true;
116 if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
119 if (const CastInst *CI = dyn_cast<CastInst>(II)) {
120 // Noop casts, including ptr <-> int, don't count.
121 if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
122 isa<PtrToIntInst>(CI))
124 // trunc to a native type is free (assuming the target has compare and
125 // shift-right of the same width).
126 if (isa<TruncInst>(CI) && TD &&
127 TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
129 // Result of a cmp instruction is often extended (to be used by other
130 // cmp instructions, logical or return instructions). These are usually
131 // nop on most sane targets.
132 if (isa<CmpInst>(CI->getOperand(0)))
134 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){
135 // If a GEP has all constant indices, it will probably be folded with
137 if (GEPI->hasAllConstantIndices())
144 if (isa<ReturnInst>(BB->getTerminator()))
147 // We never want to inline functions that contain an indirectbr. This is
148 // incorrect because all the blockaddress's (in static global initializers
149 // for example) would be referring to the original function, and this indirect
150 // jump would jump from the inlined copy of the function into the original
151 // function which is extremely undefined behavior.
152 // FIXME: This logic isn't really right; we can safely inline functions
153 // with indirectbr's as long as no other function or global references the
154 // blockaddress of a block within the current function. And as a QOI issue,
155 // if someone is using a blockaddress without an indirectbr, and that
156 // reference somehow ends up in another function or global, we probably
157 // don't want to inline this function.
158 if (isa<IndirectBrInst>(BB->getTerminator()))
159 containsIndirectBr = true;
161 // Remember NumInsts for this BB.
162 NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
165 unsigned InlineCostAnalyzer::FunctionInfo::countCodeReductionForConstant(
166 const CodeMetrics &Metrics, Value *V) {
167 unsigned Reduction = 0;
168 SmallVector<Value *, 4> Worklist;
169 Worklist.push_back(V);
171 Value *V = Worklist.pop_back_val();
172 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
174 if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
175 // We will be able to eliminate all but one of the successors.
176 const TerminatorInst &TI = cast<TerminatorInst>(*U);
177 const unsigned NumSucc = TI.getNumSuccessors();
179 for (unsigned I = 0; I != NumSucc; ++I)
180 Instrs += Metrics.NumBBInsts.lookup(TI.getSuccessor(I));
181 // We don't know which blocks will be eliminated, so use the average size.
182 Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
186 // Figure out if this instruction will be removed due to simple constant
188 Instruction &Inst = cast<Instruction>(*U);
190 // We can't constant propagate instructions which have effects or
193 // FIXME: It would be nice to capture the fact that a load from a
194 // pointer-to-constant-global is actually a *really* good thing to zap.
195 // Unfortunately, we don't know the pointer that may get propagated here,
196 // so we can't make this decision.
197 if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
198 isa<AllocaInst>(Inst))
201 bool AllOperandsConstant = true;
202 for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
203 if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
204 AllOperandsConstant = false;
207 if (!AllOperandsConstant)
210 // We will get to remove this instruction...
211 Reduction += InlineConstants::InstrCost;
213 // And any other instructions that use it which become constants
215 Worklist.push_back(&Inst);
217 } while (!Worklist.empty());
221 static unsigned countCodeReductionForAllocaICmp(const CodeMetrics &Metrics,
223 unsigned Reduction = 0;
225 // Bail if this is comparing against a non-constant; there is nothing we can
227 if (!isa<Constant>(ICI->getOperand(1)))
230 // An icmp pred (alloca, C) becomes true if the predicate is true when
231 // equal and false otherwise.
232 bool Result = ICI->isTrueWhenEqual();
234 SmallVector<Instruction *, 4> Worklist;
235 Worklist.push_back(ICI);
237 Instruction *U = Worklist.pop_back_val();
238 Reduction += InlineConstants::InstrCost;
239 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
241 Instruction *I = dyn_cast<Instruction>(*UI);
242 if (!I || I->mayHaveSideEffects()) continue;
243 if (I->getNumOperands() == 1)
244 Worklist.push_back(I);
245 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
246 // If BO produces the same value as U, then the other operand is
247 // irrelevant and we can put it into the Worklist to continue
248 // deleting dead instructions. If BO produces the same value as the
249 // other operand, we can delete BO but that's it.
250 if (Result == true) {
251 if (BO->getOpcode() == Instruction::Or)
252 Worklist.push_back(I);
253 if (BO->getOpcode() == Instruction::And)
254 Reduction += InlineConstants::InstrCost;
256 if (BO->getOpcode() == Instruction::Or ||
257 BO->getOpcode() == Instruction::Xor)
258 Reduction += InlineConstants::InstrCost;
259 if (BO->getOpcode() == Instruction::And)
260 Worklist.push_back(I);
263 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
264 BasicBlock *BB = BI->getSuccessor(Result ? 0 : 1);
265 if (BB->getSinglePredecessor())
267 += InlineConstants::InstrCost * Metrics.NumBBInsts.lookup(BB);
270 } while (!Worklist.empty());
275 /// \brief Compute the reduction possible for a given instruction if we are able
276 /// to SROA an alloca.
278 /// The reduction for this instruction is added to the SROAReduction output
279 /// parameter. Returns false if this instruction is expected to defeat SROA in
281 static bool countCodeReductionForSROAInst(Instruction *I,
282 SmallVectorImpl<Value *> &Worklist,
283 unsigned &SROAReduction) {
284 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
287 SROAReduction += InlineConstants::InstrCost;
291 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
294 SROAReduction += InlineConstants::InstrCost;
298 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
299 // If the GEP has variable indices, we won't be able to do much with it.
300 if (!GEP->hasAllConstantIndices())
302 // A non-zero GEP will likely become a mask operation after SROA.
303 if (GEP->hasAllZeroIndices())
304 SROAReduction += InlineConstants::InstrCost;
305 Worklist.push_back(GEP);
309 if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
310 // Track pointer through bitcasts.
311 Worklist.push_back(BCI);
312 SROAReduction += InlineConstants::InstrCost;
316 // We just look for non-constant operands to ICmp instructions as those will
317 // defeat SROA. The actual reduction for these happens even without SROA.
318 if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
319 return isa<Constant>(ICI->getOperand(1));
321 if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
322 // SROA can handle a select of alloca iff all uses of the alloca are
323 // loads, and dereferenceable. We assume it's dereferenceable since
324 // we're told the input is an alloca.
325 for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
327 LoadInst *LI = dyn_cast<LoadInst>(*UI);
328 if (LI == 0 || !LI->isSimple())
331 // We don't know whether we'll be deleting the rest of the chain of
332 // instructions from the SelectInst on, because we don't know whether
333 // the other side of the select is also an alloca or not.
337 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
338 switch (II->getIntrinsicID()) {
341 case Intrinsic::memset:
342 case Intrinsic::memcpy:
343 case Intrinsic::memmove:
344 case Intrinsic::lifetime_start:
345 case Intrinsic::lifetime_end:
346 // SROA can usually chew through these intrinsics.
347 SROAReduction += InlineConstants::InstrCost;
352 // If there is some other strange instruction, we're not going to be
353 // able to do much if we inline this.
357 unsigned InlineCostAnalyzer::FunctionInfo::countCodeReductionForAlloca(
358 const CodeMetrics &Metrics, Value *V) {
359 if (!V->getType()->isPointerTy()) return 0; // Not a pointer
360 unsigned Reduction = 0;
361 unsigned SROAReduction = 0;
362 bool CanSROAAlloca = true;
364 SmallVector<Value *, 4> Worklist;
365 Worklist.push_back(V);
367 Value *V = Worklist.pop_back_val();
368 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
370 Instruction *I = cast<Instruction>(*UI);
372 if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
373 Reduction += countCodeReductionForAllocaICmp(Metrics, ICI);
376 CanSROAAlloca = countCodeReductionForSROAInst(I, Worklist,
379 } while (!Worklist.empty());
381 return Reduction + (CanSROAAlloca ? SROAReduction : 0);
384 void InlineCostAnalyzer::FunctionInfo::countCodeReductionForPointerPair(
385 const CodeMetrics &Metrics, DenseMap<Value *, unsigned> &PointerArgs,
386 Value *V, unsigned ArgIdx) {
387 SmallVector<Value *, 4> Worklist;
388 Worklist.push_back(V);
390 Value *V = Worklist.pop_back_val();
391 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
393 Instruction *I = cast<Instruction>(*UI);
395 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
396 // If the GEP has variable indices, we won't be able to do much with it.
397 if (!GEP->hasAllConstantIndices())
399 // Unless the GEP is in-bounds, some comparisons will be non-constant.
400 // Fortunately, the real-world cases where this occurs uses in-bounds
401 // GEPs, and so we restrict the optimization to them here.
402 if (!GEP->isInBounds())
405 // Constant indices just change the constant offset. Add the resulting
406 // value both to our worklist for this argument, and to the set of
407 // viable paired values with future arguments.
408 PointerArgs[GEP] = ArgIdx;
409 Worklist.push_back(GEP);
413 // Track pointer through casts. Even when the result is not a pointer, it
414 // remains a constant relative to constants derived from other constant
416 if (CastInst *CI = dyn_cast<CastInst>(I)) {
417 PointerArgs[CI] = ArgIdx;
418 Worklist.push_back(CI);
422 // There are two instructions which produce a strict constant value when
423 // applied to two related pointer values. Ignore everything else.
424 if (!isa<ICmpInst>(I) && I->getOpcode() != Instruction::Sub)
426 assert(I->getNumOperands() == 2);
428 // Ensure that the two operands are in our set of potentially paired
429 // pointers (or are derived from them).
430 Value *OtherArg = I->getOperand(0);
432 OtherArg = I->getOperand(1);
433 DenseMap<Value *, unsigned>::const_iterator ArgIt
434 = PointerArgs.find(OtherArg);
435 if (ArgIt == PointerArgs.end())
437 std::pair<unsigned, unsigned> ArgPair(ArgIt->second, ArgIdx);
438 if (ArgPair.first > ArgPair.second)
439 std::swap(ArgPair.first, ArgPair.second);
441 PointerArgPairWeights[ArgPair]
442 += countCodeReductionForConstant(Metrics, I);
444 } while (!Worklist.empty());
447 /// analyzeFunction - Fill in the current structure with information gleaned
448 /// from the specified function.
449 void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
450 // If this function contains a call that "returns twice" (e.g., setjmp or
451 // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
452 // This is a hack because we depend on the user marking their local variables
453 // as volatile if they are live across a setjmp call, and they probably
454 // won't do this in callers.
455 exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
456 !F->hasFnAttr(Attribute::ReturnsTwice);
458 // Look at the size of the callee.
459 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
460 analyzeBasicBlock(&*BB, TD);
463 /// analyzeFunction - Fill in the current structure with information gleaned
464 /// from the specified function.
465 void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F,
466 const TargetData *TD) {
467 Metrics.analyzeFunction(F, TD);
469 // A function with exactly one return has it removed during the inlining
470 // process (see InlineFunction), so don't count it.
471 // FIXME: This knowledge should really be encoded outside of FunctionInfo.
472 if (Metrics.NumRets==1)
475 ArgumentWeights.reserve(F->arg_size());
476 DenseMap<Value *, unsigned> PointerArgs;
478 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
480 // Count how much code can be eliminated if one of the arguments is
481 // a constant or an alloca.
482 ArgumentWeights.push_back(ArgInfo(countCodeReductionForConstant(Metrics, I),
483 countCodeReductionForAlloca(Metrics, I)));
485 // If the argument is a pointer, also check for pairs of pointers where
486 // knowing a fixed offset between them allows simplification. This pattern
487 // arises mostly due to STL algorithm patterns where pointers are used as
488 // random access iterators.
489 if (!I->getType()->isPointerTy())
491 PointerArgs[I] = ArgIdx;
492 countCodeReductionForPointerPair(Metrics, PointerArgs, I, ArgIdx);
496 /// NeverInline - returns true if the function should never be inlined into
498 bool InlineCostAnalyzer::FunctionInfo::NeverInline() {
499 return (Metrics.exposesReturnsTwice || Metrics.isRecursive ||
500 Metrics.containsIndirectBr);
503 // ConstantFunctionBonus - Figure out how much of a bonus we can get for
504 // possibly devirtualizing a function. We'll subtract the size of the function
505 // we may wish to inline from the indirect call bonus providing a limit on
506 // growth. Leave an upper limit of 0 for the bonus - we don't want to penalize
507 // inlining because we decide we don't want to give a bonus for
509 int InlineCostAnalyzer::ConstantFunctionBonus(CallSite CS, Constant *C) {
511 // This could just be NULL.
514 Function *F = dyn_cast<Function>(C);
517 int Bonus = InlineConstants::IndirectCallBonus + getInlineSize(CS, F);
518 return (Bonus > 0) ? 0 : Bonus;
521 // CountBonusForConstant - Figure out an approximation for how much per-call
522 // performance boost we can expect if the specified value is constant.
523 int InlineCostAnalyzer::CountBonusForConstant(Value *V, Constant *C) {
525 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
527 if (CallInst *CI = dyn_cast<CallInst>(U)) {
528 // Turning an indirect call into a direct call is a BIG win
529 if (CI->getCalledValue() == V)
530 Bonus += ConstantFunctionBonus(CallSite(CI), C);
531 } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
532 // Turning an indirect call into a direct call is a BIG win
533 if (II->getCalledValue() == V)
534 Bonus += ConstantFunctionBonus(CallSite(II), C);
536 // FIXME: Eliminating conditional branches and switches should
537 // also yield a per-call performance boost.
539 // Figure out the bonuses that wll accrue due to simple constant
541 Instruction &Inst = cast<Instruction>(*U);
543 // We can't constant propagate instructions which have effects or
546 // FIXME: It would be nice to capture the fact that a load from a
547 // pointer-to-constant-global is actually a *really* good thing to zap.
548 // Unfortunately, we don't know the pointer that may get propagated here,
549 // so we can't make this decision.
550 if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
551 isa<AllocaInst>(Inst))
554 bool AllOperandsConstant = true;
555 for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
556 if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
557 AllOperandsConstant = false;
561 if (AllOperandsConstant)
562 Bonus += CountBonusForConstant(&Inst);
569 int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
570 // Get information about the callee.
571 FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
573 // If we haven't calculated this information yet, do so now.
574 if (CalleeFI->Metrics.NumBlocks == 0)
575 CalleeFI->analyzeFunction(Callee, TD);
577 // InlineCost - This value measures how good of an inline candidate this call
578 // site is to inline. A lower inline cost make is more likely for the call to
579 // be inlined. This value may go negative.
583 // Compute any size reductions we can expect due to arguments being passed into
587 CallSite::arg_iterator I = CS.arg_begin();
588 for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
589 FI != FE; ++I, ++FI, ++ArgNo) {
591 // If an alloca is passed in, inlining this function is likely to allow
592 // significant future optimization possibilities (like scalar promotion, and
593 // scalarization), so encourage the inlining of the function.
595 if (isa<AllocaInst>(I))
596 InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
598 // If this is a constant being passed into the function, use the argument
599 // weights calculated for the callee to determine how much will be folded
600 // away with this information.
601 else if (isa<Constant>(I))
602 InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
605 const DenseMap<std::pair<unsigned, unsigned>, unsigned> &ArgPairWeights
606 = CalleeFI->PointerArgPairWeights;
607 for (DenseMap<std::pair<unsigned, unsigned>, unsigned>::const_iterator I
608 = ArgPairWeights.begin(), E = ArgPairWeights.end();
610 if (CS.getArgument(I->first.first)->stripInBoundsConstantOffsets() ==
611 CS.getArgument(I->first.second)->stripInBoundsConstantOffsets())
612 InlineCost -= I->second;
614 // Each argument passed in has a cost at both the caller and the callee
615 // sides. Measurements show that each argument costs about the same as an
617 InlineCost -= (CS.arg_size() * InlineConstants::InstrCost);
619 // Now that we have considered all of the factors that make the call site more
620 // likely to be inlined, look at factors that make us not want to inline it.
622 // Calls usually take a long time, so they make the inlining gain smaller.
623 InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
625 // Look at the size of the callee. Each instruction counts as 5.
626 InlineCost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
631 int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
632 // Get information about the callee.
633 FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
635 // If we haven't calculated this information yet, do so now.
636 if (CalleeFI->Metrics.NumBlocks == 0)
637 CalleeFI->analyzeFunction(Callee, TD);
639 bool isDirectCall = CS.getCalledFunction() == Callee;
640 Instruction *TheCall = CS.getInstruction();
643 // If there is only one call of the function, and it has internal linkage,
644 // make it almost guaranteed to be inlined.
646 if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
647 Bonus += InlineConstants::LastCallToStaticBonus;
649 // If the instruction after the call, or if the normal destination of the
650 // invoke is an unreachable instruction, the function is noreturn. As such,
651 // there is little point in inlining this.
652 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
653 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
654 Bonus += InlineConstants::NoreturnPenalty;
655 } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
656 Bonus += InlineConstants::NoreturnPenalty;
658 // If this function uses the coldcc calling convention, prefer not to inline
660 if (Callee->getCallingConv() == CallingConv::Cold)
661 Bonus += InlineConstants::ColdccPenalty;
663 // Add to the inline quality for properties that make the call valuable to
664 // inline. This includes factors that indicate that the result of inlining
665 // the function will be optimizable. Currently this just looks at arguments
666 // passed into the function.
668 CallSite::arg_iterator I = CS.arg_begin();
669 for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
671 // Compute any constant bonus due to inlining we want to give here.
672 if (isa<Constant>(I))
673 Bonus += CountBonusForConstant(FI, cast<Constant>(I));
678 // getInlineCost - The heuristic used to determine if we should inline the
679 // function call or not.
681 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
682 SmallPtrSet<const Function*, 16> &NeverInline) {
683 return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
686 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
688 SmallPtrSet<const Function*, 16> &NeverInline) {
689 Instruction *TheCall = CS.getInstruction();
690 Function *Caller = TheCall->getParent()->getParent();
692 // Don't inline functions which can be redefined at link-time to mean
693 // something else. Don't inline functions marked noinline or call sites
695 if (Callee->mayBeOverridden() ||
696 Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
698 return llvm::InlineCost::getNever();
700 // Get information about the callee.
701 FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
703 // If we haven't calculated this information yet, do so now.
704 if (CalleeFI->Metrics.NumBlocks == 0)
705 CalleeFI->analyzeFunction(Callee, TD);
707 // If we should never inline this, return a huge cost.
708 if (CalleeFI->NeverInline())
709 return InlineCost::getNever();
711 // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
712 // could move this up and avoid computing the FunctionInfo for
713 // things we are going to just return always inline for. This
714 // requires handling setjmp somewhere else, however.
715 if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
716 return InlineCost::getAlways();
718 if (CalleeFI->Metrics.usesDynamicAlloca) {
719 // Get information about the caller.
720 FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
722 // If we haven't calculated this information yet, do so now.
723 if (CallerFI.Metrics.NumBlocks == 0) {
724 CallerFI.analyzeFunction(Caller, TD);
726 // Recompute the CalleeFI pointer, getting Caller could have invalidated
728 CalleeFI = &CachedFunctionInfo[Callee];
731 // Don't inline a callee with dynamic alloca into a caller without them.
732 // Functions containing dynamic alloca's are inefficient in various ways;
733 // don't create more inefficiency.
734 if (!CallerFI.Metrics.usesDynamicAlloca)
735 return InlineCost::getNever();
738 // InlineCost - This value measures how good of an inline candidate this call
739 // site is to inline. A lower inline cost make is more likely for the call to
740 // be inlined. This value may go negative due to the fact that bonuses
741 // are negative numbers.
743 int InlineCost = getInlineSize(CS, Callee) + getInlineBonuses(CS, Callee);
744 return llvm::InlineCost::get(InlineCost);
747 // getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
748 // higher threshold to determine if the function call should be inlined.
749 float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
750 Function *Callee = CS.getCalledFunction();
752 // Get information about the callee.
753 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
755 // If we haven't calculated this information yet, do so now.
756 if (CalleeFI.Metrics.NumBlocks == 0)
757 CalleeFI.analyzeFunction(Callee, TD);
760 // Single BB functions are often written to be inlined.
761 if (CalleeFI.Metrics.NumBlocks == 1)
764 // Be more aggressive if the function contains a good chunk (if it mades up
765 // at least 10% of the instructions) of vector instructions.
766 if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/2)
768 else if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/10)
773 /// growCachedCostInfo - update the cached cost info for Caller after Callee has
776 InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
777 CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
779 // For small functions we prefer to recalculate the cost for better accuracy.
780 if (CallerMetrics.NumBlocks < 10 && CallerMetrics.NumInsts < 1000) {
781 resetCachedCostInfo(Caller);
785 // For large functions, we can save a lot of computation time by skipping
787 if (CallerMetrics.NumCalls > 0)
788 --CallerMetrics.NumCalls;
790 if (Callee == 0) return;
792 CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
794 // If we don't have metrics for the callee, don't recalculate them just to
795 // update an approximation in the caller. Instead, just recalculate the
796 // caller info from scratch.
797 if (CalleeMetrics.NumBlocks == 0) {
798 resetCachedCostInfo(Caller);
802 // Since CalleeMetrics were already calculated, we know that the CallerMetrics
803 // reference isn't invalidated: both were in the DenseMap.
804 CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
806 // FIXME: If any of these three are true for the callee, the callee was
807 // not inlined into the caller, so I think they're redundant here.
808 CallerMetrics.exposesReturnsTwice |= CalleeMetrics.exposesReturnsTwice;
809 CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
810 CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
812 CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
813 CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
814 CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
815 CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
816 CallerMetrics.NumRets += CalleeMetrics.NumRets;
818 // analyzeBasicBlock counts each function argument as an inst.
819 if (CallerMetrics.NumInsts >= Callee->arg_size())
820 CallerMetrics.NumInsts -= Callee->arg_size();
822 CallerMetrics.NumInsts = 0;
824 // We are not updating the argument weights. We have already determined that
825 // Caller is a fairly large function, so we accept the loss of precision.
828 /// clear - empty the cache of inline costs
829 void InlineCostAnalyzer::clear() {
830 CachedFunctionInfo.clear();