#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
-// CountCodeReductionForConstant - Figure out an approximation for how many
-// instructions will be constant folded if the specified value is constant.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
- CountCodeReductionForConstant(Value *V) {
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (isa<BranchInst>(*UI) || isa<SwitchInst>(*UI)) {
- // We will be able to eliminate all but one of the successors.
- const TerminatorInst &TI = cast<TerminatorInst>(**UI);
- const unsigned NumSucc = TI.getNumSuccessors();
- unsigned Instrs = 0;
- for (unsigned I = 0; I != NumSucc; ++I)
- Instrs += TI.getSuccessor(I)->size();
- // We don't know which blocks will be eliminated, so use the average size.
- Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
- } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
- // Turning an indirect call into a direct call is a BIG win
- if (CI->getCalledValue() == V)
- Reduction += InlineConstants::IndirectCallBonus;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
- // Turning an indirect call into a direct call is a BIG win
- if (II->getCalledValue() == V)
- Reduction += InlineConstants::IndirectCallBonus;
- } else {
- // Figure out if this instruction will be removed due to simple constant
- // propagation.
- Instruction &Inst = cast<Instruction>(**UI);
-
- // We can't constant propagate instructions which have effects or
- // read memory.
- //
- // FIXME: It would be nice to capture the fact that a load from a
- // pointer-to-constant-global is actually a *really* good thing to zap.
- // Unfortunately, we don't know the pointer that may get propagated here,
- // so we can't make this decision.
- if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
- isa<AllocaInst>(Inst))
- continue;
-
- bool AllOperandsConstant = true;
- for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
- if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
- AllOperandsConstant = false;
- break;
- }
-
- if (AllOperandsConstant) {
- // We will get to remove this instruction...
- Reduction += InlineConstants::InstrCost;
-
- // And any other instructions that use it which become constants
- // themselves.
- Reduction += CountCodeReductionForConstant(&Inst);
- }
- }
-
- return Reduction;
-}
-
-// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
-// the function will be if it is inlined into a context where an argument
-// becomes an alloca.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
- CountCodeReductionForAlloca(Value *V) {
- if (!V->getType()->isPointerTy()) return 0; // Not a pointer
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *I = cast<Instruction>(*UI);
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += InlineConstants::InstrCost;
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has variable indices, we won't be able to do much with it.
- if (GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP);
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
- // Track pointer through bitcasts.
- Reduction += CountCodeReductionForAlloca(BCI);
- } else {
- // If there is some other strange instruction, we're not going to be able
- // to do much if we inline this.
- return 0;
- }
- }
-
- return Reduction;
-}
-
-// callIsSmall - If a call is likely to lower to a single target instruction, or
-// is otherwise deemed small return true.
-// TODO: Perhaps calls like memcpy, strcpy, etc?
-static bool callIsSmall(const Function *F) {
+/// callIsSmall - If a call is likely to lower to a single target instruction,
+/// or is otherwise deemed small return true.
+/// TODO: Perhaps calls like memcpy, strcpy, etc?
+bool llvm::callIsSmall(const Function *F) {
if (!F) return false;
if (F->hasLocalLinkage()) return false;
StringRef Name = F->getName();
// These will all likely lower to a single selection DAG node.
- if (Name == "copysign" || Name == "copysignf" ||
+ if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
Name == "sin" || Name == "sinf" || Name == "sinl" ||
Name == "cos" || Name == "cosf" || Name == "cosl" ||
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
++NumBlocks;
-
+ unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
II != E; ++II) {
if (isa<PHINode>(II)) continue; // PHI nodes don't count.
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
if (isa<DbgInfoIntrinsic>(II))
continue; // Debug intrinsics don't count as size.
-
- CallSite CS = CallSite::get(const_cast<Instruction*>(&*II));
-
+
+ ImmutableCallSite CS(cast<Instruction>(II));
+
// If this function contains a call to setjmp or _setjmp, never inline
// it. This is a hack because we depend on the user marking their local
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
- if (Function *F = CS.getCalledFunction())
+ if (const Function *F = CS.getCalledFunction()) {
+ // If a function is both internal and has a single use, then it is
+ // extremely likely to get inlined in the future (it was probably
+ // exposed by an interleaved devirtualization pass).
+ if (F->hasInternalLinkage() && F->hasOneUse())
+ ++NumInlineCandidates;
+
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
- NeverInline = true;
+ callsSetJmp = true;
+
+ // If this call is to function itself, then the function is recursive.
+ // Inlining it into other functions is a bad idea, because this is
+ // basically just a form of loop peeling, and our metrics aren't useful
+ // for that case.
+ if (F == BB->getParent())
+ isRecursive = true;
+ }
if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
// Each argument to a call takes on average one instruction to set up.
NumInsts += CS.arg_size();
- ++NumCalls;
+
+ // We don't want inline asm to count as a call - that would prevent loop
+ // unrolling. The argument setup cost is still real, though.
+ if (!isa<InlineAsm>(CS.getCalledValue()))
+ ++NumCalls;
}
}
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
if (isa<IndirectBrInst>(BB->getTerminator()))
- NeverInline = true;
+ containsIndirectBr = true;
+
+ // Remember NumInsts for this BB.
+ NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
+}
+
+// CountBonusForConstant - Figure out an approximation for how much per-call
+// performance boost we can expect if the specified value is constant.
+unsigned CodeMetrics::CountBonusForConstant(Value *V) {
+ unsigned Bonus = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ User *U = *UI;
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
+ // Turning an indirect call into a direct call is a BIG win
+ if (CI->getCalledValue() == V)
+ Bonus += InlineConstants::IndirectCallBonus;
+ }
+ else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+ // Turning an indirect call into a direct call is a BIG win
+ if (II->getCalledValue() == V)
+ Bonus += InlineConstants::IndirectCallBonus;
+ }
+ // FIXME: Eliminating conditional branches and switches should
+ // also yield a per-call performance boost.
+ else {
+ // Figure out the bonuses that wll accrue due to simple constant
+ // propagation.
+ Instruction &Inst = cast<Instruction>(*U);
+
+ // We can't constant propagate instructions which have effects or
+ // read memory.
+ //
+ // FIXME: It would be nice to capture the fact that a load from a
+ // pointer-to-constant-global is actually a *really* good thing to zap.
+ // Unfortunately, we don't know the pointer that may get propagated here,
+ // so we can't make this decision.
+ if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+ isa<AllocaInst>(Inst))
+ continue;
+
+ bool AllOperandsConstant = true;
+ for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+ if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+ AllOperandsConstant = false;
+ break;
+ }
+
+ if (AllOperandsConstant)
+ Bonus += CountBonusForConstant(&Inst);
+ }
+ }
+ return Bonus;
+}
+
+
+// CountCodeReductionForConstant - Figure out an approximation for how many
+// instructions will be constant folded if the specified value is constant.
+//
+unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
+ unsigned Reduction = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ User *U = *UI;
+ if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
+ // We will be able to eliminate all but one of the successors.
+ const TerminatorInst &TI = cast<TerminatorInst>(*U);
+ const unsigned NumSucc = TI.getNumSuccessors();
+ unsigned Instrs = 0;
+ for (unsigned I = 0; I != NumSucc; ++I)
+ Instrs += NumBBInsts[TI.getSuccessor(I)];
+ // We don't know which blocks will be eliminated, so use the average size.
+ Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
+ } else {
+ // Figure out if this instruction will be removed due to simple constant
+ // propagation.
+ Instruction &Inst = cast<Instruction>(*U);
+
+ // We can't constant propagate instructions which have effects or
+ // read memory.
+ //
+ // FIXME: It would be nice to capture the fact that a load from a
+ // pointer-to-constant-global is actually a *really* good thing to zap.
+ // Unfortunately, we don't know the pointer that may get propagated here,
+ // so we can't make this decision.
+ if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+ isa<AllocaInst>(Inst))
+ continue;
+
+ bool AllOperandsConstant = true;
+ for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+ if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+ AllOperandsConstant = false;
+ break;
+ }
+
+ if (AllOperandsConstant) {
+ // We will get to remove this instruction...
+ Reduction += InlineConstants::InstrCost;
+
+ // And any other instructions that use it which become constants
+ // themselves.
+ Reduction += CountCodeReductionForConstant(&Inst);
+ }
+ }
+ }
+ return Reduction;
+}
+
+// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
+// the function will be if it is inlined into a context where an argument
+// becomes an alloca.
+//
+unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
+ if (!V->getType()->isPointerTy()) return 0; // Not a pointer
+ unsigned Reduction = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ Instruction *I = cast<Instruction>(*UI);
+ if (isa<LoadInst>(I) || isa<StoreInst>(I))
+ Reduction += InlineConstants::InstrCost;
+ else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has variable indices, we won't be able to do much with it.
+ if (GEP->hasAllConstantIndices())
+ Reduction += CountCodeReductionForAlloca(GEP);
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+ // Track pointer through bitcasts.
+ Reduction += CountCodeReductionForAlloca(BCI);
+ } else {
+ // If there is some other strange instruction, we're not going to be able
+ // to do much if we inline this.
+ return 0;
+ }
+ }
+
+ return Reduction;
}
/// analyzeFunction - Fill in the current structure with information gleaned
// Don't bother calculating argument weights if we are never going to inline
// the function anyway.
- if (Metrics.NeverInline)
+ if (NeverInline())
return;
// Check out all of the arguments to the function, figuring out how much
// code can be eliminated if one of the arguments is a constant.
ArgumentWeights.reserve(F->arg_size());
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
- ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
- CountCodeReductionForAlloca(I)));
+ ArgumentWeights.push_back(ArgInfo(Metrics.CountCodeReductionForConstant(I),
+ Metrics.CountCodeReductionForAlloca(I),
+ Metrics.CountBonusForConstant(I)));
+}
+
+/// NeverInline - returns true if the function should never be inlined into
+/// any caller
+bool InlineCostAnalyzer::FunctionInfo::NeverInline()
+{
+ return (Metrics.callsSetJmp || Metrics.isRecursive ||
+ Metrics.containsIndirectBr);
+
+}
+// getSpecializationBonus - The heuristic used to determine the per-call
+// performance boost for using a specialization of Callee with argument
+// specializedArgNo replaced by a constant.
+int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+ if (Callee->mayBeOverridden())
+ return 0;
+
+ int Bonus = 0;
+ // If this function uses the coldcc calling convention, prefer not to
+ // specialize it.
+ if (Callee->getCallingConv() == CallingConv::Cold)
+ Bonus -= InlineConstants::ColdccPenalty;
+
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
+
+
+ for (unsigned i = 0, s = SpecializedArgNos.size();
+ i < s; ++i )
+ {
+ Bonus += CalleeFI->ArgumentWeights[SpecializedArgNos[i]].ConstantBonus;
+ }
+ // Calls usually take a long time, so they make the specialization gain
+ // smaller.
+ Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+
+ return Bonus;
}
+
// getInlineCost - The heuristic used to determine if we should inline the
// function call or not.
//
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- SmallPtrSet<const Function *, 16> &NeverInline) {
+ SmallPtrSet<const Function*, 16> &NeverInline) {
+ return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
+}
+
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
+ Function *Callee,
+ SmallPtrSet<const Function*, 16> &NeverInline) {
Instruction *TheCall = CS.getInstruction();
- Function *Callee = CS.getCalledFunction();
Function *Caller = TheCall->getParent()->getParent();
+ bool isDirectCall = CS.getCalledFunction() == Callee;
// Don't inline functions which can be redefined at link-time to mean
- // something else. Don't inline functions marked noinline.
+ // something else. Don't inline functions marked noinline or call sites
+ // marked noinline.
if (Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee))
+ Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
+ CS.isNoInline())
return llvm::InlineCost::getNever();
// InlineCost - This value measures how good of an inline candidate this call
// be inlined. This value may go negative.
//
int InlineCost = 0;
-
+
// If there is only one call of the function, and it has internal linkage,
// make it almost guaranteed to be inlined.
//
- if (Callee->hasLocalLinkage() && Callee->hasOneUse())
+ if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
InlineCost += InlineConstants::LastCallToStaticBonus;
// If this function uses the coldcc calling convention, prefer not to inline
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
InlineCost += InlineConstants::NoreturnPenalty;
- // Get information about the callee...
- FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
// If we haven't calculated this information yet, do so now.
- if (CalleeFI.Metrics.NumBlocks == 0)
- CalleeFI.analyzeFunction(Callee);
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
// If we should never inline this, return a huge cost.
- if (CalleeFI.Metrics.NeverInline)
+ if (CalleeFI->NeverInline())
return InlineCost::getNever();
- // FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we
+ // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
// could move this up and avoid computing the FunctionInfo for
// things we are going to just return always inline for. This
// requires handling setjmp somewhere else, however.
if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
return InlineCost::getAlways();
- if (CalleeFI.Metrics.usesDynamicAlloca) {
- // Get infomation about the caller...
+ if (CalleeFI->Metrics.usesDynamicAlloca) {
+ // Get infomation about the caller.
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
// If we haven't calculated this information yet, do so now.
- if (CallerFI.Metrics.NumBlocks == 0)
+ if (CallerFI.Metrics.NumBlocks == 0) {
CallerFI.analyzeFunction(Caller);
+
+ // Recompute the CalleeFI pointer, getting Caller could have invalidated
+ // it.
+ CalleeFI = &CachedFunctionInfo[Callee];
+ }
// Don't inline a callee with dynamic alloca into a caller without them.
// Functions containing dynamic alloca's are inefficient in various ways;
// scalarization), so encourage the inlining of the function.
//
if (isa<AllocaInst>(I)) {
- if (ArgNo < CalleeFI.ArgumentWeights.size())
- InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
+ if (ArgNo < CalleeFI->ArgumentWeights.size())
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
// If this is a constant being passed into the function, use the argument
// weights calculated for the callee to determine how much will be folded
// away with this information.
} else if (isa<Constant>(I)) {
- if (ArgNo < CalleeFI.ArgumentWeights.size())
- InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
+ if (ArgNo < CalleeFI->ArgumentWeights.size())
+ InlineCost -= (CalleeFI->ArgumentWeights[ArgNo].ConstantWeight +
+ CalleeFI->ArgumentWeights[ArgNo].ConstantBonus);
}
}
// likely to be inlined, look at factors that make us not want to inline it.
// Calls usually take a long time, so they make the inlining gain smaller.
- InlineCost += CalleeFI.Metrics.NumCalls * InlineConstants::CallPenalty;
+ InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
- // Don't inline into something too big, which would make it bigger.
- // "size" here is the number of basic blocks, not instructions.
- //
- InlineCost += Caller->size()/15;
-
// Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI.Metrics.NumInsts*InlineConstants::InstrCost;
+ InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
return llvm::InlineCost::get(InlineCost);
}
+// getSpecializationCost - The heuristic used to determine the code-size
+// impact of creating a specialized version of Callee with argument
+// SpecializedArgNo replaced by a constant.
+InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+ // Don't specialize functions which can be redefined at link-time to mean
+ // something else.
+ if (Callee->mayBeOverridden())
+ return llvm::InlineCost::getNever();
+
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
+
+ int Cost = 0;
+
+ // Look at the orginal size of the callee. Each instruction counts as 5.
+ Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
+
+ // Offset that with the amount of code that can be constant-folded
+ // away with the given arguments replaced by constants.
+ for (SmallVectorImpl<unsigned>::iterator an = SpecializedArgNos.begin(),
+ ae = SpecializedArgNos.end(); an != ae; ++an)
+ {
+ Cost -= CalleeFI->ArgumentWeights[*an].ConstantWeight;
+ }
+
+ return llvm::InlineCost::get(Cost);
+}
+
// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
// higher threshold to determine if the function call should be inlined.
float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
Function *Callee = CS.getCalledFunction();
- // Get information about the callee...
+ // Get information about the callee.
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
// If we haven't calculated this information yet, do so now.
Factor += 1.5f;
return Factor;
}
+
+/// growCachedCostInfo - update the cached cost info for Caller after Callee has
+/// been inlined.
+void
+InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
+ CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
+
+ // For small functions we prefer to recalculate the cost for better accuracy.
+ if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
+ resetCachedCostInfo(Caller);
+ return;
+ }
+
+ // For large functions, we can save a lot of computation time by skipping
+ // recalculations.
+ if (CallerMetrics.NumCalls > 0)
+ --CallerMetrics.NumCalls;
+
+ if (Callee == 0) return;
+
+ CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
+
+ // If we don't have metrics for the callee, don't recalculate them just to
+ // update an approximation in the caller. Instead, just recalculate the
+ // caller info from scratch.
+ if (CalleeMetrics.NumBlocks == 0) {
+ resetCachedCostInfo(Caller);
+ return;
+ }
+
+ // Since CalleeMetrics were already calculated, we know that the CallerMetrics
+ // reference isn't invalidated: both were in the DenseMap.
+ CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
+
+ // FIXME: If any of these three are true for the callee, the callee was
+ // not inlined into the caller, so I think they're redundant here.
+ CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
+ CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
+ CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
+
+ CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
+ CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
+ CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
+ CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
+ CallerMetrics.NumRets += CalleeMetrics.NumRets;
+
+ // analyzeBasicBlock counts each function argument as an inst.
+ if (CallerMetrics.NumInsts >= Callee->arg_size())
+ CallerMetrics.NumInsts -= Callee->arg_size();
+ else
+ CallerMetrics.NumInsts = 0;
+
+ // We are not updating the argument weights. We have already determined that
+ // Caller is a fairly large function, so we accept the loss of precision.
+}
+
+/// clear - empty the cache of inline costs
+void InlineCostAnalyzer::clear() {
+ CachedFunctionInfo.clear();
+}